2019-05-28 20:10:04 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2020-06-01 10:04:44 +03:00
//
// Driver for Cadence QSPI Controller
//
// Copyright Altera Corporation (C) 2012-2014. All rights reserved.
// Copyright Intel Corporation (C) 2019-2020. All rights reserved.
// Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
2016-06-04 03:39:34 +03:00
# include <linux/clk.h>
# include <linux/completion.h>
# include <linux/delay.h>
2018-04-10 11:19:10 +03:00
# include <linux/dma-mapping.h>
# include <linux/dmaengine.h>
2016-06-04 03:39:34 +03:00
# include <linux/err.h>
# include <linux/errno.h>
2021-09-24 13:07:10 +03:00
# include <linux/firmware/xlnx-zynqmp.h>
2016-06-04 03:39:34 +03:00
# include <linux/interrupt.h>
# include <linux/io.h>
2019-08-16 01:55:36 +03:00
# include <linux/iopoll.h>
2016-06-04 03:39:34 +03:00
# include <linux/jiffies.h>
# include <linux/kernel.h>
2022-03-31 14:08:19 +03:00
# include <linux/log2.h>
2016-06-04 03:39:34 +03:00
# include <linux/module.h>
# include <linux/of_device.h>
# include <linux/of.h>
# include <linux/platform_device.h>
2017-10-03 08:19:25 +03:00
# include <linux/pm_runtime.h>
2019-06-13 14:31:38 +03:00
# include <linux/reset.h>
2016-06-04 03:39:34 +03:00
# include <linux/sched.h>
# include <linux/spi/spi.h>
2020-06-01 10:04:43 +03:00
# include <linux/spi/spi-mem.h>
2016-06-04 03:39:34 +03:00
# include <linux/timer.h>
# define CQSPI_NAME "cadence-qspi"
# define CQSPI_MAX_CHIPSELECT 16
2017-10-03 08:19:21 +03:00
/* Quirks */
# define CQSPI_NEEDS_WR_DELAY BIT(0)
2020-06-01 10:04:38 +03:00
# define CQSPI_DISABLE_DAC_MODE BIT(1)
2021-09-24 13:07:11 +03:00
# define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
2021-11-08 23:08:54 +03:00
# define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
2022-08-13 07:26:16 +03:00
# define CQSPI_SLOW_SRAM BIT(4)
2017-10-03 08:19:21 +03:00
2020-06-01 10:04:43 +03:00
/* Capabilities */
# define CQSPI_SUPPORTS_OCTAL BIT(0)
2019-02-12 11:38:09 +03:00
2022-04-20 18:56:15 +03:00
# define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0)
2016-06-04 03:39:34 +03:00
struct cqspi_st ;
struct cqspi_flash_pdata {
struct cqspi_st * cqspi ;
u32 clk_rate ;
u32 read_delay ;
u32 tshsl_ns ;
u32 tsd2d_ns ;
u32 tchsh_ns ;
u32 tslch_ns ;
u8 cs ;
} ;
struct cqspi_st {
struct platform_device * pdev ;
2022-05-11 14:55:16 +03:00
struct spi_master * master ;
2016-06-04 03:39:34 +03:00
struct clk * clk ;
unsigned int sclk ;
void __iomem * iobase ;
void __iomem * ahb_base ;
2017-12-29 12:11:03 +03:00
resource_size_t ahb_size ;
2016-06-04 03:39:34 +03:00
struct completion transfer_complete ;
2018-04-10 11:19:10 +03:00
struct dma_chan * rx_chan ;
struct completion rx_dma_complete ;
dma_addr_t mmap_phys_base ;
2016-06-04 03:39:34 +03:00
int current_cs ;
unsigned long master_ref_clk_hz ;
bool is_decoded_cs ;
u32 fifo_depth ;
u32 fifo_width ;
2020-11-24 07:18:38 +03:00
u32 num_chipselect ;
2017-10-03 08:19:23 +03:00
bool rclk_en ;
2016-06-04 03:39:34 +03:00
u32 trigger_address ;
2017-10-03 08:19:21 +03:00
u32 wr_delay ;
2020-06-01 10:04:43 +03:00
bool use_direct_mode ;
2023-01-25 11:10:21 +03:00
bool use_direct_mode_wr ;
2016-06-04 03:39:34 +03:00
struct cqspi_flash_pdata f_pdata [ CQSPI_MAX_CHIPSELECT ] ;
2021-09-24 13:07:11 +03:00
bool use_dma_read ;
2021-09-24 13:07:10 +03:00
u32 pd_dev_id ;
2021-11-08 23:08:54 +03:00
bool wr_completion ;
2022-08-13 07:26:16 +03:00
bool slow_sram ;
2016-06-04 03:39:34 +03:00
} ;
2019-02-12 11:38:09 +03:00
struct cqspi_driver_platdata {
u32 hwcaps_mask ;
u8 quirks ;
2021-09-24 13:07:11 +03:00
int ( * indirect_read_dma ) ( struct cqspi_flash_pdata * f_pdata ,
u_char * rxbuf , loff_t from_addr , size_t n_rx ) ;
u32 ( * get_dma_status ) ( struct cqspi_st * cqspi ) ;
2019-02-12 11:38:09 +03:00
} ;
2016-06-04 03:39:34 +03:00
/* Operation timeout value */
# define CQSPI_TIMEOUT_MS 500
# define CQSPI_READ_TIMEOUT_MS 10
# define CQSPI_DUMMY_CLKS_PER_BYTE 8
# define CQSPI_DUMMY_BYTES_MAX 4
# define CQSPI_DUMMY_CLKS_MAX 31
# define CQSPI_STIG_DATA_LEN_MAX 8
/* Register map */
# define CQSPI_REG_CONFIG 0x00
# define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
2017-12-29 12:11:03 +03:00
# define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7)
2016-06-04 03:39:34 +03:00
# define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
# define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
# define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
# define CQSPI_REG_CONFIG_BAUD_LSB 19
2020-12-22 21:44:25 +03:00
# define CQSPI_REG_CONFIG_DTR_PROTO BIT(24)
# define CQSPI_REG_CONFIG_DUAL_OPCODE BIT(30)
2016-06-04 03:39:34 +03:00
# define CQSPI_REG_CONFIG_IDLE_LSB 31
# define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
# define CQSPI_REG_CONFIG_BAUD_MASK 0xF
# define CQSPI_REG_RD_INSTR 0x04
# define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
# define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
# define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
# define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
# define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
# define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
# define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
# define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
# define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
# define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
# define CQSPI_REG_WR_INSTR 0x08
# define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
# define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
# define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
# define CQSPI_REG_DELAY 0x0C
# define CQSPI_REG_DELAY_TSLCH_LSB 0
# define CQSPI_REG_DELAY_TCHSH_LSB 8
# define CQSPI_REG_DELAY_TSD2D_LSB 16
# define CQSPI_REG_DELAY_TSHSL_LSB 24
# define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
# define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
# define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
# define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
# define CQSPI_REG_READCAPTURE 0x10
# define CQSPI_REG_READCAPTURE_BYPASS_LSB 0
# define CQSPI_REG_READCAPTURE_DELAY_LSB 1
# define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF
# define CQSPI_REG_SIZE 0x14
# define CQSPI_REG_SIZE_ADDRESS_LSB 0
# define CQSPI_REG_SIZE_PAGE_LSB 4
# define CQSPI_REG_SIZE_BLOCK_LSB 16
# define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
# define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
# define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
# define CQSPI_REG_SRAMPARTITION 0x18
# define CQSPI_REG_INDIRECTTRIGGER 0x1C
# define CQSPI_REG_DMA 0x20
# define CQSPI_REG_DMA_SINGLE_LSB 0
# define CQSPI_REG_DMA_BURST_LSB 8
# define CQSPI_REG_DMA_SINGLE_MASK 0xFF
# define CQSPI_REG_DMA_BURST_MASK 0xFF
# define CQSPI_REG_REMAP 0x24
# define CQSPI_REG_MODE_BIT 0x28
# define CQSPI_REG_SDRAMLEVEL 0x2C
# define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
# define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
# define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
# define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
2020-12-22 21:44:25 +03:00
# define CQSPI_REG_WR_COMPLETION_CTRL 0x38
# define CQSPI_REG_WR_DISABLE_AUTO_POLL BIT(14)
2016-06-04 03:39:34 +03:00
# define CQSPI_REG_IRQSTATUS 0x40
# define CQSPI_REG_IRQMASK 0x44
# define CQSPI_REG_INDIRECTRD 0x60
# define CQSPI_REG_INDIRECTRD_START_MASK BIT(0)
# define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1)
# define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5)
# define CQSPI_REG_INDIRECTRDWATERMARK 0x64
# define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
# define CQSPI_REG_INDIRECTRDBYTES 0x6C
# define CQSPI_REG_CMDCTRL 0x90
# define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
# define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
2020-12-22 21:44:21 +03:00
# define CQSPI_REG_CMDCTRL_DUMMY_LSB 7
2016-06-04 03:39:34 +03:00
# define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
# define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
# define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
# define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
# define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
# define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
# define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
# define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
# define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
# define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
2020-12-22 21:44:21 +03:00
# define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F
2016-06-04 03:39:34 +03:00
# define CQSPI_REG_INDIRECTWR 0x70
# define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
# define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1)
# define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5)
# define CQSPI_REG_INDIRECTWRWATERMARK 0x74
# define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
# define CQSPI_REG_INDIRECTWRBYTES 0x7C
2021-09-24 13:07:11 +03:00
# define CQSPI_REG_INDTRIG_ADDRRANGE 0x80
2016-06-04 03:39:34 +03:00
# define CQSPI_REG_CMDADDRESS 0x94
# define CQSPI_REG_CMDREADDATALOWER 0xA0
# define CQSPI_REG_CMDREADDATAUPPER 0xA4
# define CQSPI_REG_CMDWRITEDATALOWER 0xA8
# define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
2020-12-22 21:44:25 +03:00
# define CQSPI_REG_POLLING_STATUS 0xB0
# define CQSPI_REG_POLLING_STATUS_DUMMY_LSB 16
# define CQSPI_REG_OP_EXT_LOWER 0xE0
# define CQSPI_REG_OP_EXT_READ_LSB 24
# define CQSPI_REG_OP_EXT_WRITE_LSB 16
# define CQSPI_REG_OP_EXT_STIG_LSB 0
2021-09-24 13:07:11 +03:00
# define CQSPI_REG_VERSAL_DMA_SRC_ADDR 0x1000
# define CQSPI_REG_VERSAL_DMA_DST_ADDR 0x1800
# define CQSPI_REG_VERSAL_DMA_DST_SIZE 0x1804
# define CQSPI_REG_VERSAL_DMA_DST_CTRL 0x180C
# define CQSPI_REG_VERSAL_DMA_DST_I_STS 0x1814
# define CQSPI_REG_VERSAL_DMA_DST_I_EN 0x1818
# define CQSPI_REG_VERSAL_DMA_DST_I_DIS 0x181C
# define CQSPI_REG_VERSAL_DMA_DST_DONE_MASK BIT(1)
# define CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB 0x1828
# define CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL 0xF43FFA00
# define CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL 0x6
2016-06-04 03:39:34 +03:00
/* Interrupt status bits */
# define CQSPI_REG_IRQ_MODE_ERR BIT(0)
# define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
# define CQSPI_REG_IRQ_IND_COMP BIT(2)
# define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3)
# define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4)
# define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5)
# define CQSPI_REG_IRQ_WATERMARK BIT(6)
# define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12)
# define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \
CQSPI_REG_IRQ_IND_SRAM_FULL | \
CQSPI_REG_IRQ_IND_COMP )
# define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \
CQSPI_REG_IRQ_WATERMARK | \
CQSPI_REG_IRQ_UNDERFLOW )
# define CQSPI_IRQ_STATUS_MASK 0x1FFFF
2021-09-24 13:07:11 +03:00
# define CQSPI_DMA_UNALIGN 0x3
# define CQSPI_REG_VERSAL_DMA_VAL 0x602
2016-06-04 03:39:34 +03:00
2019-08-16 01:55:36 +03:00
static int cqspi_wait_for_bit ( void __iomem * reg , const u32 mask , bool clr )
2016-06-04 03:39:34 +03:00
{
u32 val ;
2019-08-16 01:55:36 +03:00
return readl_relaxed_poll_timeout ( reg , val ,
( ( ( clr ? ~ val : val ) & mask ) = = mask ) ,
10 , CQSPI_TIMEOUT_MS * 1000 ) ;
2016-06-04 03:39:34 +03:00
}
static bool cqspi_is_idle ( struct cqspi_st * cqspi )
{
u32 reg = readl ( cqspi - > iobase + CQSPI_REG_CONFIG ) ;
2021-03-04 13:47:52 +03:00
return reg & ( 1UL < < CQSPI_REG_CONFIG_IDLE_LSB ) ;
2016-06-04 03:39:34 +03:00
}
static u32 cqspi_get_rd_sram_level ( struct cqspi_st * cqspi )
{
u32 reg = readl ( cqspi - > iobase + CQSPI_REG_SDRAMLEVEL ) ;
reg > > = CQSPI_REG_SDRAMLEVEL_RD_LSB ;
return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK ;
}
2021-09-24 13:07:11 +03:00
static u32 cqspi_get_versal_dma_status ( struct cqspi_st * cqspi )
{
u32 dma_status ;
dma_status = readl ( cqspi - > iobase +
CQSPI_REG_VERSAL_DMA_DST_I_STS ) ;
writel ( dma_status , cqspi - > iobase +
CQSPI_REG_VERSAL_DMA_DST_I_STS ) ;
return dma_status & CQSPI_REG_VERSAL_DMA_DST_DONE_MASK ;
}
2016-06-04 03:39:34 +03:00
static irqreturn_t cqspi_irq_handler ( int this_irq , void * dev )
{
struct cqspi_st * cqspi = dev ;
unsigned int irq_status ;
2021-09-24 13:07:11 +03:00
struct device * device = & cqspi - > pdev - > dev ;
const struct cqspi_driver_platdata * ddata ;
ddata = of_device_get_match_data ( device ) ;
2016-06-04 03:39:34 +03:00
/* Read interrupt status */
irq_status = readl ( cqspi - > iobase + CQSPI_REG_IRQSTATUS ) ;
/* Clear interrupt */
writel ( irq_status , cqspi - > iobase + CQSPI_REG_IRQSTATUS ) ;
2021-09-24 13:07:11 +03:00
if ( cqspi - > use_dma_read & & ddata & & ddata - > get_dma_status ) {
if ( ddata - > get_dma_status ( cqspi ) ) {
complete ( & cqspi - > transfer_complete ) ;
return IRQ_HANDLED ;
}
}
2022-08-13 07:26:16 +03:00
else if ( ! cqspi - > slow_sram )
irq_status & = CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR ;
else
irq_status & = CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR ;
2016-06-04 03:39:34 +03:00
if ( irq_status )
complete ( & cqspi - > transfer_complete ) ;
return IRQ_HANDLED ;
}
2022-04-20 18:56:15 +03:00
static unsigned int cqspi_calc_rdreg ( const struct spi_mem_op * op )
2016-06-04 03:39:34 +03:00
{
u32 rdreg = 0 ;
2022-04-20 18:56:15 +03:00
rdreg | = CQSPI_OP_WIDTH ( op - > cmd ) < < CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB ;
rdreg | = CQSPI_OP_WIDTH ( op - > addr ) < < CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB ;
rdreg | = CQSPI_OP_WIDTH ( op - > data ) < < CQSPI_REG_RD_INSTR_TYPE_DATA_LSB ;
2016-06-04 03:39:34 +03:00
return rdreg ;
}
2022-04-20 18:56:15 +03:00
static unsigned int cqspi_calc_dummy ( const struct spi_mem_op * op )
2020-12-22 21:44:21 +03:00
{
2021-07-16 17:33:12 +03:00
unsigned int dummy_clk ;
2020-12-22 21:44:21 +03:00
2021-07-16 17:35:13 +03:00
if ( ! op - > dummy . nbytes )
return 0 ;
2021-07-16 17:33:12 +03:00
dummy_clk = op - > dummy . nbytes * ( 8 / op - > dummy . buswidth ) ;
2022-04-20 18:56:15 +03:00
if ( op - > cmd . dtr )
2021-07-16 17:33:12 +03:00
dummy_clk / = 2 ;
2020-12-22 21:44:21 +03:00
return dummy_clk ;
}
2016-06-04 03:39:34 +03:00
static int cqspi_wait_idle ( struct cqspi_st * cqspi )
{
const unsigned int poll_idle_retry = 3 ;
unsigned int count = 0 ;
unsigned long timeout ;
timeout = jiffies + msecs_to_jiffies ( CQSPI_TIMEOUT_MS ) ;
while ( 1 ) {
/*
* Read few times in succession to ensure the controller
* is indeed idle , that is , the bit does not transition
* low again .
*/
if ( cqspi_is_idle ( cqspi ) )
count + + ;
else
count = 0 ;
if ( count > = poll_idle_retry )
return 0 ;
if ( time_after ( jiffies , timeout ) ) {
/* Timeout, in busy mode. */
dev_err ( & cqspi - > pdev - > dev ,
" QSPI is still busy after %dms timeout. \n " ,
CQSPI_TIMEOUT_MS ) ;
return - ETIMEDOUT ;
}
cpu_relax ( ) ;
}
}
static int cqspi_exec_flash_cmd ( struct cqspi_st * cqspi , unsigned int reg )
{
void __iomem * reg_base = cqspi - > iobase ;
int ret ;
/* Write the CMDCTRL without start execution. */
writel ( reg , reg_base + CQSPI_REG_CMDCTRL ) ;
/* Start execute */
reg | = CQSPI_REG_CMDCTRL_EXECUTE_MASK ;
writel ( reg , reg_base + CQSPI_REG_CMDCTRL ) ;
/* Polling for completion. */
ret = cqspi_wait_for_bit ( reg_base + CQSPI_REG_CMDCTRL ,
CQSPI_REG_CMDCTRL_INPROGRESS_MASK , 1 ) ;
if ( ret ) {
dev_err ( & cqspi - > pdev - > dev ,
" Flash command execution timed out. \n " ) ;
return ret ;
}
/* Polling QSPI idle status. */
return cqspi_wait_idle ( cqspi ) ;
}
2020-12-22 21:44:25 +03:00
static int cqspi_setup_opcode_ext ( struct cqspi_flash_pdata * f_pdata ,
const struct spi_mem_op * op ,
unsigned int shift )
{
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * reg_base = cqspi - > iobase ;
unsigned int reg ;
u8 ext ;
if ( op - > cmd . nbytes ! = 2 )
return - EINVAL ;
/* Opcode extension is the LSB. */
ext = op - > cmd . opcode & 0xff ;
reg = readl ( reg_base + CQSPI_REG_OP_EXT_LOWER ) ;
reg & = ~ ( 0xff < < shift ) ;
reg | = ext < < shift ;
writel ( reg , reg_base + CQSPI_REG_OP_EXT_LOWER ) ;
return 0 ;
}
static int cqspi_enable_dtr ( struct cqspi_flash_pdata * f_pdata ,
2022-04-20 18:56:15 +03:00
const struct spi_mem_op * op , unsigned int shift )
2020-12-22 21:44:25 +03:00
{
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * reg_base = cqspi - > iobase ;
unsigned int reg ;
int ret ;
reg = readl ( reg_base + CQSPI_REG_CONFIG ) ;
/*
* We enable dual byte opcode here . The callers have to set up the
* extension opcode based on which type of operation it is .
*/
2022-04-20 18:56:15 +03:00
if ( op - > cmd . dtr ) {
2020-12-22 21:44:25 +03:00
reg | = CQSPI_REG_CONFIG_DTR_PROTO ;
reg | = CQSPI_REG_CONFIG_DUAL_OPCODE ;
/* Set up command opcode extension. */
ret = cqspi_setup_opcode_ext ( f_pdata , op , shift ) ;
if ( ret )
return ret ;
} else {
reg & = ~ CQSPI_REG_CONFIG_DTR_PROTO ;
reg & = ~ CQSPI_REG_CONFIG_DUAL_OPCODE ;
}
writel ( reg , reg_base + CQSPI_REG_CONFIG ) ;
return cqspi_wait_idle ( cqspi ) ;
}
2020-06-01 10:04:43 +03:00
static int cqspi_command_read ( struct cqspi_flash_pdata * f_pdata ,
const struct spi_mem_op * op )
2016-06-04 03:39:34 +03:00
{
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * reg_base = cqspi - > iobase ;
2020-06-01 10:04:43 +03:00
u8 * rxbuf = op - > data . buf . in ;
2020-12-22 21:44:25 +03:00
u8 opcode ;
2020-06-01 10:04:43 +03:00
size_t n_rx = op - > data . nbytes ;
2016-06-04 03:39:34 +03:00
unsigned int rdreg ;
unsigned int reg ;
2020-12-22 21:44:21 +03:00
unsigned int dummy_clk ;
2019-09-24 10:45:53 +03:00
size_t read_len ;
2016-06-04 03:39:34 +03:00
int status ;
2022-04-20 18:56:15 +03:00
status = cqspi_enable_dtr ( f_pdata , op , CQSPI_REG_OP_EXT_STIG_LSB ) ;
2020-12-22 21:44:25 +03:00
if ( status )
return status ;
2016-06-04 03:39:34 +03:00
if ( ! n_rx | | n_rx > CQSPI_STIG_DATA_LEN_MAX | | ! rxbuf ) {
2020-06-01 10:04:43 +03:00
dev_err ( & cqspi - > pdev - > dev ,
2019-09-24 10:45:53 +03:00
" Invalid input argument, len %zu rxbuf 0x%p \n " ,
2016-06-04 03:39:34 +03:00
n_rx , rxbuf ) ;
return - EINVAL ;
}
2022-04-20 18:56:15 +03:00
if ( op - > cmd . dtr )
2020-12-22 21:44:25 +03:00
opcode = op - > cmd . opcode > > 8 ;
else
opcode = op - > cmd . opcode ;
2019-09-24 10:45:58 +03:00
reg = opcode < < CQSPI_REG_CMDCTRL_OPCODE_LSB ;
2016-06-04 03:39:34 +03:00
2022-04-20 18:56:15 +03:00
rdreg = cqspi_calc_rdreg ( op ) ;
2016-06-04 03:39:34 +03:00
writel ( rdreg , reg_base + CQSPI_REG_RD_INSTR ) ;
2022-04-20 18:56:15 +03:00
dummy_clk = cqspi_calc_dummy ( op ) ;
2020-12-22 21:44:21 +03:00
if ( dummy_clk > CQSPI_DUMMY_CLKS_MAX )
return - EOPNOTSUPP ;
if ( dummy_clk )
reg | = ( dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK )
< < CQSPI_REG_CMDCTRL_DUMMY_LSB ;
2016-06-04 03:39:34 +03:00
reg | = ( 0x1 < < CQSPI_REG_CMDCTRL_RD_EN_LSB ) ;
/* 0 means 1 byte. */
reg | = ( ( ( n_rx - 1 ) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK )
< < CQSPI_REG_CMDCTRL_RD_BYTES_LSB ) ;
2023-01-25 11:10:22 +03:00
/* setup ADDR BIT field */
if ( op - > addr . nbytes ) {
reg | = ( 0x1 < < CQSPI_REG_CMDCTRL_ADDR_EN_LSB ) ;
reg | = ( ( op - > addr . nbytes - 1 ) &
CQSPI_REG_CMDCTRL_ADD_BYTES_MASK )
< < CQSPI_REG_CMDCTRL_ADD_BYTES_LSB ;
writel ( op - > addr . val , reg_base + CQSPI_REG_CMDADDRESS ) ;
}
2016-06-04 03:39:34 +03:00
status = cqspi_exec_flash_cmd ( cqspi , reg ) ;
if ( status )
return status ;
reg = readl ( reg_base + CQSPI_REG_CMDREADDATALOWER ) ;
/* Put the read value into rx_buf */
read_len = ( n_rx > 4 ) ? 4 : n_rx ;
memcpy ( rxbuf , & reg , read_len ) ;
rxbuf + = read_len ;
if ( n_rx > 4 ) {
reg = readl ( reg_base + CQSPI_REG_CMDREADDATAUPPER ) ;
read_len = n_rx - read_len ;
memcpy ( rxbuf , & reg , read_len ) ;
}
2023-01-25 11:10:20 +03:00
/* Reset CMD_CTRL Reg once command read completes */
writel ( 0 , reg_base + CQSPI_REG_CMDCTRL ) ;
2016-06-04 03:39:34 +03:00
return 0 ;
}
2020-06-01 10:04:43 +03:00
static int cqspi_command_write ( struct cqspi_flash_pdata * f_pdata ,
const struct spi_mem_op * op )
2016-06-04 03:39:34 +03:00
{
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * reg_base = cqspi - > iobase ;
2020-12-22 21:44:25 +03:00
u8 opcode ;
2020-06-01 10:04:43 +03:00
const u8 * txbuf = op - > data . buf . out ;
size_t n_tx = op - > data . nbytes ;
2016-06-04 03:39:34 +03:00
unsigned int reg ;
unsigned int data ;
2019-09-24 10:45:53 +03:00
size_t write_len ;
2020-12-22 21:44:25 +03:00
int ret ;
2022-04-20 18:56:15 +03:00
ret = cqspi_enable_dtr ( f_pdata , op , CQSPI_REG_OP_EXT_STIG_LSB ) ;
2020-12-22 21:44:25 +03:00
if ( ret )
return ret ;
2016-06-04 03:39:34 +03:00
2019-01-28 08:02:29 +03:00
if ( n_tx > CQSPI_STIG_DATA_LEN_MAX | | ( n_tx & & ! txbuf ) ) {
2020-06-01 10:04:43 +03:00
dev_err ( & cqspi - > pdev - > dev ,
2019-09-24 10:45:53 +03:00
" Invalid input argument, cmdlen %zu txbuf 0x%p \n " ,
2016-06-04 03:39:34 +03:00
n_tx , txbuf ) ;
return - EINVAL ;
}
2022-04-20 18:56:15 +03:00
reg = cqspi_calc_rdreg ( op ) ;
2020-12-22 21:44:25 +03:00
writel ( reg , reg_base + CQSPI_REG_RD_INSTR ) ;
2022-04-20 18:56:15 +03:00
if ( op - > cmd . dtr )
2020-12-22 21:44:25 +03:00
opcode = op - > cmd . opcode > > 8 ;
else
opcode = op - > cmd . opcode ;
2016-06-04 03:39:34 +03:00
reg = opcode < < CQSPI_REG_CMDCTRL_OPCODE_LSB ;
2020-06-01 10:04:43 +03:00
if ( op - > addr . nbytes ) {
reg | = ( 0x1 < < CQSPI_REG_CMDCTRL_ADDR_EN_LSB ) ;
reg | = ( ( op - > addr . nbytes - 1 ) &
CQSPI_REG_CMDCTRL_ADD_BYTES_MASK )
< < CQSPI_REG_CMDCTRL_ADD_BYTES_LSB ;
writel ( op - > addr . val , reg_base + CQSPI_REG_CMDADDRESS ) ;
}
2016-06-04 03:39:34 +03:00
if ( n_tx ) {
reg | = ( 0x1 < < CQSPI_REG_CMDCTRL_WR_EN_LSB ) ;
reg | = ( ( n_tx - 1 ) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK )
< < CQSPI_REG_CMDCTRL_WR_BYTES_LSB ;
data = 0 ;
2019-01-28 08:02:29 +03:00
write_len = ( n_tx > 4 ) ? 4 : n_tx ;
memcpy ( & data , txbuf , write_len ) ;
txbuf + = write_len ;
2016-06-04 03:39:34 +03:00
writel ( data , reg_base + CQSPI_REG_CMDWRITEDATALOWER ) ;
2019-01-28 08:02:29 +03:00
if ( n_tx > 4 ) {
data = 0 ;
write_len = n_tx - 4 ;
memcpy ( & data , txbuf , write_len ) ;
writel ( data , reg_base + CQSPI_REG_CMDWRITEDATAUPPER ) ;
}
}
2016-06-04 03:39:34 +03:00
2023-01-25 11:10:20 +03:00
ret = cqspi_exec_flash_cmd ( cqspi , reg ) ;
/* Reset CMD_CTRL Reg once command write completes */
writel ( 0 , reg_base + CQSPI_REG_CMDCTRL ) ;
return ret ;
2016-06-04 03:39:34 +03:00
}
2020-06-01 10:04:43 +03:00
static int cqspi_read_setup ( struct cqspi_flash_pdata * f_pdata ,
const struct spi_mem_op * op )
2016-06-04 03:39:34 +03:00
{
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * reg_base = cqspi - > iobase ;
unsigned int dummy_clk = 0 ;
unsigned int reg ;
2020-12-22 21:44:25 +03:00
int ret ;
u8 opcode ;
2016-06-04 03:39:34 +03:00
2022-04-20 18:56:15 +03:00
ret = cqspi_enable_dtr ( f_pdata , op , CQSPI_REG_OP_EXT_READ_LSB ) ;
2020-12-22 21:44:25 +03:00
if ( ret )
return ret ;
2022-04-20 18:56:15 +03:00
if ( op - > cmd . dtr )
2020-12-22 21:44:25 +03:00
opcode = op - > cmd . opcode > > 8 ;
else
opcode = op - > cmd . opcode ;
reg = opcode < < CQSPI_REG_RD_INSTR_OPCODE_LSB ;
2022-04-20 18:56:15 +03:00
reg | = cqspi_calc_rdreg ( op ) ;
2016-06-04 03:39:34 +03:00
/* Setup dummy clock cycles */
2022-04-20 18:56:15 +03:00
dummy_clk = cqspi_calc_dummy ( op ) ;
2020-12-22 21:44:21 +03:00
2016-06-04 03:39:34 +03:00
if ( dummy_clk > CQSPI_DUMMY_CLKS_MAX )
2020-12-22 21:44:20 +03:00
return - EOPNOTSUPP ;
2016-06-04 03:39:34 +03:00
2020-06-01 10:04:43 +03:00
if ( dummy_clk )
reg | = ( dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK )
< < CQSPI_REG_RD_INSTR_DUMMY_LSB ;
2016-06-04 03:39:34 +03:00
writel ( reg , reg_base + CQSPI_REG_RD_INSTR ) ;
/* Set address width */
reg = readl ( reg_base + CQSPI_REG_SIZE ) ;
reg & = ~ CQSPI_REG_SIZE_ADDRESS_MASK ;
2020-06-01 10:04:43 +03:00
reg | = ( op - > addr . nbytes - 1 ) ;
2016-06-04 03:39:34 +03:00
writel ( reg , reg_base + CQSPI_REG_SIZE ) ;
return 0 ;
}
2020-06-01 10:04:43 +03:00
static int cqspi_indirect_read_execute ( struct cqspi_flash_pdata * f_pdata ,
u8 * rxbuf , loff_t from_addr ,
const size_t n_rx )
2016-06-04 03:39:34 +03:00
{
struct cqspi_st * cqspi = f_pdata - > cqspi ;
2020-06-01 10:04:43 +03:00
struct device * dev = & cqspi - > pdev - > dev ;
2016-06-04 03:39:34 +03:00
void __iomem * reg_base = cqspi - > iobase ;
void __iomem * ahb_base = cqspi - > ahb_base ;
unsigned int remaining = n_rx ;
2018-04-23 20:45:11 +03:00
unsigned int mod_bytes = n_rx % 4 ;
2016-06-04 03:39:34 +03:00
unsigned int bytes_to_read = 0 ;
2018-04-23 20:45:11 +03:00
u8 * rxbuf_end = rxbuf + n_rx ;
2016-06-04 03:39:34 +03:00
int ret = 0 ;
2017-12-29 12:11:02 +03:00
writel ( from_addr , reg_base + CQSPI_REG_INDIRECTRDSTARTADDR ) ;
2016-06-04 03:39:34 +03:00
writel ( remaining , reg_base + CQSPI_REG_INDIRECTRDBYTES ) ;
/* Clear all interrupts. */
writel ( CQSPI_IRQ_STATUS_MASK , reg_base + CQSPI_REG_IRQSTATUS ) ;
2022-08-13 07:26:16 +03:00
/*
* On SoCFPGA platform reading the SRAM is slow due to
* hardware limitation and causing read interrupt storm to CPU ,
* so enabling only watermark interrupt to disable all read
* interrupts later as we want to run " bytes to read " loop with
* all the read interrupts disabled for max performance .
*/
if ( ! cqspi - > slow_sram )
writel ( CQSPI_IRQ_MASK_RD , reg_base + CQSPI_REG_IRQMASK ) ;
else
writel ( CQSPI_REG_IRQ_WATERMARK , reg_base + CQSPI_REG_IRQMASK ) ;
2016-06-04 03:39:34 +03:00
reinit_completion ( & cqspi - > transfer_complete ) ;
writel ( CQSPI_REG_INDIRECTRD_START_MASK ,
reg_base + CQSPI_REG_INDIRECTRD ) ;
while ( remaining > 0 ) {
2018-07-21 17:21:51 +03:00
if ( ! wait_for_completion_timeout ( & cqspi - > transfer_complete ,
2020-06-01 10:04:43 +03:00
msecs_to_jiffies ( CQSPI_READ_TIMEOUT_MS ) ) )
2018-07-21 17:21:51 +03:00
ret = - ETIMEDOUT ;
2016-06-04 03:39:34 +03:00
2022-08-13 07:26:16 +03:00
/*
* Disable all read interrupts until
* we are out of " bytes to read "
*/
if ( cqspi - > slow_sram )
writel ( 0x0 , reg_base + CQSPI_REG_IRQMASK ) ;
2016-06-04 03:39:34 +03:00
bytes_to_read = cqspi_get_rd_sram_level ( cqspi ) ;
2018-07-21 17:21:51 +03:00
if ( ret & & bytes_to_read = = 0 ) {
2020-06-01 10:04:43 +03:00
dev_err ( dev , " Indirect read timeout, no bytes \n " ) ;
2016-06-04 03:39:34 +03:00
goto failrd ;
}
while ( bytes_to_read ! = 0 ) {
2018-04-23 20:45:11 +03:00
unsigned int word_remain = round_down ( remaining , 4 ) ;
2016-06-04 03:39:34 +03:00
bytes_to_read * = cqspi - > fifo_width ;
bytes_to_read = bytes_to_read > remaining ?
remaining : bytes_to_read ;
2018-04-23 20:45:11 +03:00
bytes_to_read = round_down ( bytes_to_read , 4 ) ;
/* Read 4 byte word chunks then single bytes */
if ( bytes_to_read ) {
ioread32_rep ( ahb_base , rxbuf ,
( bytes_to_read / 4 ) ) ;
} else if ( ! word_remain & & mod_bytes ) {
unsigned int temp = ioread32 ( ahb_base ) ;
bytes_to_read = mod_bytes ;
memcpy ( rxbuf , & temp , min ( ( unsigned int )
( rxbuf_end - rxbuf ) ,
bytes_to_read ) ) ;
}
2016-06-04 03:39:34 +03:00
rxbuf + = bytes_to_read ;
remaining - = bytes_to_read ;
bytes_to_read = cqspi_get_rd_sram_level ( cqspi ) ;
}
2022-08-13 07:26:16 +03:00
if ( remaining > 0 ) {
2016-06-04 03:39:34 +03:00
reinit_completion ( & cqspi - > transfer_complete ) ;
2022-08-13 07:26:16 +03:00
if ( cqspi - > slow_sram )
writel ( CQSPI_REG_IRQ_WATERMARK , reg_base + CQSPI_REG_IRQMASK ) ;
}
2016-06-04 03:39:34 +03:00
}
/* Check indirect done status */
ret = cqspi_wait_for_bit ( reg_base + CQSPI_REG_INDIRECTRD ,
CQSPI_REG_INDIRECTRD_DONE_MASK , 0 ) ;
if ( ret ) {
2020-06-01 10:04:43 +03:00
dev_err ( dev , " Indirect read completion error (%i) \n " , ret ) ;
2016-06-04 03:39:34 +03:00
goto failrd ;
}
/* Disable interrupt */
writel ( 0 , reg_base + CQSPI_REG_IRQMASK ) ;
/* Clear indirect completion status */
writel ( CQSPI_REG_INDIRECTRD_DONE_MASK , reg_base + CQSPI_REG_INDIRECTRD ) ;
return 0 ;
failrd :
/* Disable interrupt */
writel ( 0 , reg_base + CQSPI_REG_IRQMASK ) ;
/* Cancel the indirect read */
2023-02-22 12:21:28 +03:00
writel ( CQSPI_REG_INDIRECTRD_CANCEL_MASK ,
2016-06-04 03:39:34 +03:00
reg_base + CQSPI_REG_INDIRECTRD ) ;
return ret ;
}
2021-09-24 13:07:11 +03:00
static int cqspi_versal_indirect_read_dma ( struct cqspi_flash_pdata * f_pdata ,
u_char * rxbuf , loff_t from_addr ,
size_t n_rx )
{
struct cqspi_st * cqspi = f_pdata - > cqspi ;
struct device * dev = & cqspi - > pdev - > dev ;
void __iomem * reg_base = cqspi - > iobase ;
u32 reg , bytes_to_dma ;
loff_t addr = from_addr ;
void * buf = rxbuf ;
dma_addr_t dma_addr ;
u8 bytes_rem ;
int ret = 0 ;
bytes_rem = n_rx % 4 ;
bytes_to_dma = ( n_rx - bytes_rem ) ;
if ( ! bytes_to_dma )
goto nondmard ;
ret = zynqmp_pm_ospi_mux_select ( cqspi - > pd_dev_id , PM_OSPI_MUX_SEL_DMA ) ;
if ( ret )
return ret ;
reg = readl ( cqspi - > iobase + CQSPI_REG_CONFIG ) ;
reg | = CQSPI_REG_CONFIG_DMA_MASK ;
writel ( reg , cqspi - > iobase + CQSPI_REG_CONFIG ) ;
dma_addr = dma_map_single ( dev , rxbuf , bytes_to_dma , DMA_FROM_DEVICE ) ;
if ( dma_mapping_error ( dev , dma_addr ) ) {
dev_err ( dev , " dma mapping failed \n " ) ;
return - ENOMEM ;
}
writel ( from_addr , reg_base + CQSPI_REG_INDIRECTRDSTARTADDR ) ;
writel ( bytes_to_dma , reg_base + CQSPI_REG_INDIRECTRDBYTES ) ;
writel ( CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL ,
reg_base + CQSPI_REG_INDTRIG_ADDRRANGE ) ;
/* Clear all interrupts. */
writel ( CQSPI_IRQ_STATUS_MASK , reg_base + CQSPI_REG_IRQSTATUS ) ;
/* Enable DMA done interrupt */
writel ( CQSPI_REG_VERSAL_DMA_DST_DONE_MASK ,
reg_base + CQSPI_REG_VERSAL_DMA_DST_I_EN ) ;
/* Default DMA periph configuration */
writel ( CQSPI_REG_VERSAL_DMA_VAL , reg_base + CQSPI_REG_DMA ) ;
/* Configure DMA Dst address */
writel ( lower_32_bits ( dma_addr ) ,
reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR ) ;
writel ( upper_32_bits ( dma_addr ) ,
reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB ) ;
/* Configure DMA Src address */
writel ( cqspi - > trigger_address , reg_base +
CQSPI_REG_VERSAL_DMA_SRC_ADDR ) ;
/* Set DMA destination size */
writel ( bytes_to_dma , reg_base + CQSPI_REG_VERSAL_DMA_DST_SIZE ) ;
/* Set DMA destination control */
writel ( CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL ,
reg_base + CQSPI_REG_VERSAL_DMA_DST_CTRL ) ;
writel ( CQSPI_REG_INDIRECTRD_START_MASK ,
reg_base + CQSPI_REG_INDIRECTRD ) ;
reinit_completion ( & cqspi - > transfer_complete ) ;
if ( ! wait_for_completion_timeout ( & cqspi - > transfer_complete ,
msecs_to_jiffies ( CQSPI_READ_TIMEOUT_MS ) ) ) {
ret = - ETIMEDOUT ;
goto failrd ;
}
/* Disable DMA interrupt */
writel ( 0x0 , cqspi - > iobase + CQSPI_REG_VERSAL_DMA_DST_I_DIS ) ;
/* Clear indirect completion status */
writel ( CQSPI_REG_INDIRECTRD_DONE_MASK ,
cqspi - > iobase + CQSPI_REG_INDIRECTRD ) ;
dma_unmap_single ( dev , dma_addr , bytes_to_dma , DMA_FROM_DEVICE ) ;
reg = readl ( cqspi - > iobase + CQSPI_REG_CONFIG ) ;
reg & = ~ CQSPI_REG_CONFIG_DMA_MASK ;
writel ( reg , cqspi - > iobase + CQSPI_REG_CONFIG ) ;
ret = zynqmp_pm_ospi_mux_select ( cqspi - > pd_dev_id ,
PM_OSPI_MUX_SEL_LINEAR ) ;
if ( ret )
return ret ;
nondmard :
if ( bytes_rem ) {
addr + = bytes_to_dma ;
buf + = bytes_to_dma ;
ret = cqspi_indirect_read_execute ( f_pdata , buf , addr ,
bytes_rem ) ;
if ( ret )
return ret ;
}
return 0 ;
failrd :
/* Disable DMA interrupt */
writel ( 0x0 , reg_base + CQSPI_REG_VERSAL_DMA_DST_I_DIS ) ;
/* Cancel the indirect read */
writel ( CQSPI_REG_INDIRECTWR_CANCEL_MASK ,
reg_base + CQSPI_REG_INDIRECTRD ) ;
spi: cadence-quadspi: fix dma_unmap_single() call
There are separate constants for the dma-mapping API and the dmaengine
API, mixing them up causes a warning in some builds:
In file included from drivers/spi/spi-cadence-quadspi.c:12:
drivers/spi/spi-cadence-quadspi.c: In function 'cqspi_versal_indirect_read_dma':
drivers/spi/spi-cadence-quadspi.c:950:55: error: implicit conversion from 'enum dma_transfer_direction' to 'enum dma_data_direction' [-Werror=enum-conversion]
950 | dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_DEV_TO_MEM);
| ^~~~~~~~~~~~~~
include/linux/dma-mapping.h:407:70: note: in definition of macro 'dma_unmap_single'
407 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
| ^
Fixes: 1a6f854f7daa ("spi: cadence-quadspi: Add Xilinx Versal external DMA support")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Link: https://lore.kernel.org/r/20211013144600.2378037-1-arnd@kernel.org
Signed-off-by: Mark Brown <broonie@kernel.org>
2021-10-13 17:45:55 +03:00
dma_unmap_single ( dev , dma_addr , bytes_to_dma , DMA_FROM_DEVICE ) ;
2021-09-24 13:07:11 +03:00
reg = readl ( cqspi - > iobase + CQSPI_REG_CONFIG ) ;
reg & = ~ CQSPI_REG_CONFIG_DMA_MASK ;
writel ( reg , cqspi - > iobase + CQSPI_REG_CONFIG ) ;
zynqmp_pm_ospi_mux_select ( cqspi - > pd_dev_id , PM_OSPI_MUX_SEL_LINEAR ) ;
return ret ;
}
2020-06-01 10:04:43 +03:00
static int cqspi_write_setup ( struct cqspi_flash_pdata * f_pdata ,
const struct spi_mem_op * op )
2016-06-04 03:39:34 +03:00
{
unsigned int reg ;
2020-12-22 21:44:25 +03:00
int ret ;
2016-06-04 03:39:34 +03:00
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * reg_base = cqspi - > iobase ;
2020-12-22 21:44:25 +03:00
u8 opcode ;
2022-04-20 18:56:15 +03:00
ret = cqspi_enable_dtr ( f_pdata , op , CQSPI_REG_OP_EXT_WRITE_LSB ) ;
2020-12-22 21:44:25 +03:00
if ( ret )
return ret ;
2022-04-20 18:56:15 +03:00
if ( op - > cmd . dtr )
2020-12-22 21:44:25 +03:00
opcode = op - > cmd . opcode > > 8 ;
else
opcode = op - > cmd . opcode ;
2016-06-04 03:39:34 +03:00
/* Set opcode. */
2020-12-22 21:44:25 +03:00
reg = opcode < < CQSPI_REG_WR_INSTR_OPCODE_LSB ;
2022-04-20 18:56:15 +03:00
reg | = CQSPI_OP_WIDTH ( op - > data ) < < CQSPI_REG_WR_INSTR_TYPE_DATA_LSB ;
reg | = CQSPI_OP_WIDTH ( op - > addr ) < < CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB ;
2016-06-04 03:39:34 +03:00
writel ( reg , reg_base + CQSPI_REG_WR_INSTR ) ;
2022-04-20 18:56:15 +03:00
reg = cqspi_calc_rdreg ( op ) ;
2016-06-04 03:39:34 +03:00
writel ( reg , reg_base + CQSPI_REG_RD_INSTR ) ;
2021-07-13 15:57:41 +03:00
/*
* SPI NAND flashes require the address of the status register to be
* passed in the Read SR command . Also , some SPI NOR flashes like the
* cypress Semper flash expect a 4 - byte dummy address in the Read SR
* command in DTR mode .
*
* But this controller does not support address phase in the Read SR
* command when doing auto - HW polling . So , disable write completion
* polling on the controller ' s side . spinand and spi - nor will take
* care of polling the status register .
*/
2021-11-08 23:08:54 +03:00
if ( cqspi - > wr_completion ) {
reg = readl ( reg_base + CQSPI_REG_WR_COMPLETION_CTRL ) ;
reg | = CQSPI_REG_WR_DISABLE_AUTO_POLL ;
writel ( reg , reg_base + CQSPI_REG_WR_COMPLETION_CTRL ) ;
2023-01-25 11:10:21 +03:00
/*
* DAC mode require auto polling as flash needs to be polled
* for write completion in case of bubble in SPI transaction
* due to slow CPU / DMA master .
*/
cqspi - > use_direct_mode_wr = false ;
2021-11-08 23:08:54 +03:00
}
2020-12-22 21:44:25 +03:00
2016-06-04 03:39:34 +03:00
reg = readl ( reg_base + CQSPI_REG_SIZE ) ;
reg & = ~ CQSPI_REG_SIZE_ADDRESS_MASK ;
2020-06-01 10:04:43 +03:00
reg | = ( op - > addr . nbytes - 1 ) ;
2016-06-04 03:39:34 +03:00
writel ( reg , reg_base + CQSPI_REG_SIZE ) ;
return 0 ;
}
2020-06-01 10:04:43 +03:00
static int cqspi_indirect_write_execute ( struct cqspi_flash_pdata * f_pdata ,
loff_t to_addr , const u8 * txbuf ,
const size_t n_tx )
2016-06-04 03:39:34 +03:00
{
struct cqspi_st * cqspi = f_pdata - > cqspi ;
2020-06-01 10:04:43 +03:00
struct device * dev = & cqspi - > pdev - > dev ;
2016-06-04 03:39:34 +03:00
void __iomem * reg_base = cqspi - > iobase ;
unsigned int remaining = n_tx ;
unsigned int write_bytes ;
int ret ;
2017-12-29 12:11:02 +03:00
writel ( to_addr , reg_base + CQSPI_REG_INDIRECTWRSTARTADDR ) ;
2016-06-04 03:39:34 +03:00
writel ( remaining , reg_base + CQSPI_REG_INDIRECTWRBYTES ) ;
/* Clear all interrupts. */
writel ( CQSPI_IRQ_STATUS_MASK , reg_base + CQSPI_REG_IRQSTATUS ) ;
writel ( CQSPI_IRQ_MASK_WR , reg_base + CQSPI_REG_IRQMASK ) ;
reinit_completion ( & cqspi - > transfer_complete ) ;
writel ( CQSPI_REG_INDIRECTWR_START_MASK ,
reg_base + CQSPI_REG_INDIRECTWR ) ;
2017-10-03 08:19:21 +03:00
/*
* As per 66 AK2G02 TRM SPRUHY8F section 11.15 .5 .3 Indirect Access
* Controller programming sequence , couple of cycles of
* QSPI_REF_CLK delay is required for the above bit to
* be internally synchronized by the QSPI module . Provide 5
* cycles of delay .
*/
if ( cqspi - > wr_delay )
ndelay ( cqspi - > wr_delay ) ;
2016-06-04 03:39:34 +03:00
while ( remaining > 0 ) {
2018-11-16 17:25:49 +03:00
size_t write_words , mod_bytes ;
2020-06-01 10:04:43 +03:00
write_bytes = remaining ;
2018-11-16 17:25:49 +03:00
write_words = write_bytes / 4 ;
mod_bytes = write_bytes % 4 ;
/* Write 4 bytes at a time then single bytes. */
if ( write_words ) {
iowrite32_rep ( cqspi - > ahb_base , txbuf , write_words ) ;
txbuf + = ( write_words * 4 ) ;
}
if ( mod_bytes ) {
unsigned int temp = 0xFFFFFFFF ;
memcpy ( & temp , txbuf , mod_bytes ) ;
iowrite32 ( temp , cqspi - > ahb_base ) ;
txbuf + = mod_bytes ;
}
2016-06-04 03:39:34 +03:00
2018-07-21 17:21:51 +03:00
if ( ! wait_for_completion_timeout ( & cqspi - > transfer_complete ,
2020-06-01 10:04:43 +03:00
msecs_to_jiffies ( CQSPI_TIMEOUT_MS ) ) ) {
dev_err ( dev , " Indirect write timeout \n " ) ;
2016-06-04 03:39:34 +03:00
ret = - ETIMEDOUT ;
goto failwr ;
}
remaining - = write_bytes ;
if ( remaining > 0 )
reinit_completion ( & cqspi - > transfer_complete ) ;
}
/* Check indirect done status */
ret = cqspi_wait_for_bit ( reg_base + CQSPI_REG_INDIRECTWR ,
CQSPI_REG_INDIRECTWR_DONE_MASK , 0 ) ;
if ( ret ) {
2020-06-01 10:04:43 +03:00
dev_err ( dev , " Indirect write completion error (%i) \n " , ret ) ;
2016-06-04 03:39:34 +03:00
goto failwr ;
}
/* Disable interrupt. */
writel ( 0 , reg_base + CQSPI_REG_IRQMASK ) ;
/* Clear indirect completion status */
writel ( CQSPI_REG_INDIRECTWR_DONE_MASK , reg_base + CQSPI_REG_INDIRECTWR ) ;
cqspi_wait_idle ( cqspi ) ;
return 0 ;
failwr :
/* Disable interrupt. */
writel ( 0 , reg_base + CQSPI_REG_IRQMASK ) ;
/* Cancel the indirect write */
writel ( CQSPI_REG_INDIRECTWR_CANCEL_MASK ,
reg_base + CQSPI_REG_INDIRECTWR ) ;
return ret ;
}
2020-06-01 10:04:43 +03:00
static void cqspi_chipselect ( struct cqspi_flash_pdata * f_pdata )
2016-06-04 03:39:34 +03:00
{
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * reg_base = cqspi - > iobase ;
unsigned int chip_select = f_pdata - > cs ;
unsigned int reg ;
reg = readl ( reg_base + CQSPI_REG_CONFIG ) ;
if ( cqspi - > is_decoded_cs ) {
reg | = CQSPI_REG_CONFIG_DECODE_MASK ;
} else {
reg & = ~ CQSPI_REG_CONFIG_DECODE_MASK ;
/* Convert CS if without decoder.
* CS0 to 4 b ' 1110
* CS1 to 4 b ' 1101
* CS2 to 4 b ' 1011
* CS3 to 4 b ' 0111
*/
chip_select = 0xF & ~ ( 1 < < chip_select ) ;
}
reg & = ~ ( CQSPI_REG_CONFIG_CHIPSELECT_MASK
< < CQSPI_REG_CONFIG_CHIPSELECT_LSB ) ;
reg | = ( chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK )
< < CQSPI_REG_CONFIG_CHIPSELECT_LSB ;
writel ( reg , reg_base + CQSPI_REG_CONFIG ) ;
}
static unsigned int calculate_ticks_for_ns ( const unsigned int ref_clk_hz ,
const unsigned int ns_val )
{
unsigned int ticks ;
ticks = ref_clk_hz / 1000 ; /* kHz */
ticks = DIV_ROUND_UP ( ticks * ns_val , 1000000 ) ;
return ticks ;
}
2020-06-01 10:04:43 +03:00
static void cqspi_delay ( struct cqspi_flash_pdata * f_pdata )
2016-06-04 03:39:34 +03:00
{
struct cqspi_st * cqspi = f_pdata - > cqspi ;
void __iomem * iobase = cqspi - > iobase ;
const unsigned int ref_clk_hz = cqspi - > master_ref_clk_hz ;
unsigned int tshsl , tchsh , tslch , tsd2d ;
unsigned int reg ;
unsigned int tsclk ;
/* calculate the number of ref ticks for one sclk tick */
tsclk = DIV_ROUND_UP ( ref_clk_hz , cqspi - > sclk ) ;
tshsl = calculate_ticks_for_ns ( ref_clk_hz , f_pdata - > tshsl_ns ) ;
/* this particular value must be at least one sclk */
if ( tshsl < tsclk )
tshsl = tsclk ;
tchsh = calculate_ticks_for_ns ( ref_clk_hz , f_pdata - > tchsh_ns ) ;
tslch = calculate_ticks_for_ns ( ref_clk_hz , f_pdata - > tslch_ns ) ;
tsd2d = calculate_ticks_for_ns ( ref_clk_hz , f_pdata - > tsd2d_ns ) ;
reg = ( tshsl & CQSPI_REG_DELAY_TSHSL_MASK )
< < CQSPI_REG_DELAY_TSHSL_LSB ;
reg | = ( tchsh & CQSPI_REG_DELAY_TCHSH_MASK )
< < CQSPI_REG_DELAY_TCHSH_LSB ;
reg | = ( tslch & CQSPI_REG_DELAY_TSLCH_MASK )
< < CQSPI_REG_DELAY_TSLCH_LSB ;
reg | = ( tsd2d & CQSPI_REG_DELAY_TSD2D_MASK )
< < CQSPI_REG_DELAY_TSD2D_LSB ;
writel ( reg , iobase + CQSPI_REG_DELAY ) ;
}
static void cqspi_config_baudrate_div ( struct cqspi_st * cqspi )
{
const unsigned int ref_clk_hz = cqspi - > master_ref_clk_hz ;
void __iomem * reg_base = cqspi - > iobase ;
u32 reg , div ;
/* Recalculate the baudrate divisor based on QSPI specification. */
div = DIV_ROUND_UP ( ref_clk_hz , 2 * cqspi - > sclk ) - 1 ;
2022-11-28 19:41:47 +03:00
/* Maximum baud divisor */
if ( div > CQSPI_REG_CONFIG_BAUD_MASK ) {
div = CQSPI_REG_CONFIG_BAUD_MASK ;
dev_warn ( & cqspi - > pdev - > dev ,
" Unable to adjust clock <= %d hz. Reduced to %d hz \n " ,
cqspi - > sclk , ref_clk_hz / ( ( div + 1 ) * 2 ) ) ;
}
2016-06-04 03:39:34 +03:00
reg = readl ( reg_base + CQSPI_REG_CONFIG ) ;
reg & = ~ ( CQSPI_REG_CONFIG_BAUD_MASK < < CQSPI_REG_CONFIG_BAUD_LSB ) ;
reg | = ( div & CQSPI_REG_CONFIG_BAUD_MASK ) < < CQSPI_REG_CONFIG_BAUD_LSB ;
writel ( reg , reg_base + CQSPI_REG_CONFIG ) ;
}
static void cqspi_readdata_capture ( struct cqspi_st * cqspi ,
2017-10-03 08:19:23 +03:00
const bool bypass ,
2016-06-04 03:39:34 +03:00
const unsigned int delay )
{
void __iomem * reg_base = cqspi - > iobase ;
unsigned int reg ;
reg = readl ( reg_base + CQSPI_REG_READCAPTURE ) ;
if ( bypass )
reg | = ( 1 < < CQSPI_REG_READCAPTURE_BYPASS_LSB ) ;
else
reg & = ~ ( 1 < < CQSPI_REG_READCAPTURE_BYPASS_LSB ) ;
reg & = ~ ( CQSPI_REG_READCAPTURE_DELAY_MASK
< < CQSPI_REG_READCAPTURE_DELAY_LSB ) ;
reg | = ( delay & CQSPI_REG_READCAPTURE_DELAY_MASK )
< < CQSPI_REG_READCAPTURE_DELAY_LSB ;
writel ( reg , reg_base + CQSPI_REG_READCAPTURE ) ;
}
static void cqspi_controller_enable ( struct cqspi_st * cqspi , bool enable )
{
void __iomem * reg_base = cqspi - > iobase ;
unsigned int reg ;
reg = readl ( reg_base + CQSPI_REG_CONFIG ) ;
if ( enable )
reg | = CQSPI_REG_CONFIG_ENABLE_MASK ;
else
reg & = ~ CQSPI_REG_CONFIG_ENABLE_MASK ;
writel ( reg , reg_base + CQSPI_REG_CONFIG ) ;
}
2020-06-01 10:04:43 +03:00
static void cqspi_configure ( struct cqspi_flash_pdata * f_pdata ,
unsigned long sclk )
2016-06-04 03:39:34 +03:00
{
struct cqspi_st * cqspi = f_pdata - > cqspi ;
int switch_cs = ( cqspi - > current_cs ! = f_pdata - > cs ) ;
int switch_ck = ( cqspi - > sclk ! = sclk ) ;
if ( switch_cs | | switch_ck )
cqspi_controller_enable ( cqspi , 0 ) ;
/* Switch chip select. */
if ( switch_cs ) {
cqspi - > current_cs = f_pdata - > cs ;
2020-06-01 10:04:43 +03:00
cqspi_chipselect ( f_pdata ) ;
2016-06-04 03:39:34 +03:00
}
/* Setup baudrate divisor and delays */
if ( switch_ck ) {
cqspi - > sclk = sclk ;
cqspi_config_baudrate_div ( cqspi ) ;
2020-06-01 10:04:43 +03:00
cqspi_delay ( f_pdata ) ;
2017-10-03 08:19:23 +03:00
cqspi_readdata_capture ( cqspi , ! cqspi - > rclk_en ,
f_pdata - > read_delay ) ;
2016-06-04 03:39:34 +03:00
}
if ( switch_cs | | switch_ck )
cqspi_controller_enable ( cqspi , 1 ) ;
}
2020-06-01 10:04:43 +03:00
static ssize_t cqspi_write ( struct cqspi_flash_pdata * f_pdata ,
const struct spi_mem_op * op )
2016-06-04 03:39:34 +03:00
{
2017-12-29 12:11:03 +03:00
struct cqspi_st * cqspi = f_pdata - > cqspi ;
2020-06-01 10:04:43 +03:00
loff_t to = op - > addr . val ;
size_t len = op - > data . nbytes ;
const u_char * buf = op - > data . buf . out ;
2016-06-04 03:39:34 +03:00
int ret ;
2020-06-01 10:04:43 +03:00
ret = cqspi_write_setup ( f_pdata , op ) ;
2016-06-04 03:39:34 +03:00
if ( ret )
return ret ;
2020-12-22 21:44:25 +03:00
/*
* Some flashes like the Cypress Semper flash expect a dummy 4 - byte
* address ( all 0 s ) with the read status register command in DTR mode .
* But this controller does not support sending dummy address bytes to
* the flash when it is polling the write completion register in DTR
* mode . So , we can not use direct mode when in DTR mode for writing
* data .
*/
2022-04-20 18:56:15 +03:00
if ( ! op - > cmd . dtr & & cqspi - > use_direct_mode & &
2023-01-25 11:10:21 +03:00
cqspi - > use_direct_mode_wr & & ( ( to + len ) < = cqspi - > ahb_size ) ) {
2017-12-29 12:11:03 +03:00
memcpy_toio ( cqspi - > ahb_base + to , buf , len ) ;
2020-06-01 10:04:43 +03:00
return cqspi_wait_idle ( cqspi ) ;
2018-06-30 13:54:21 +03:00
}
2016-06-04 03:39:34 +03:00
2020-06-01 10:04:43 +03:00
return cqspi_indirect_write_execute ( f_pdata , to , buf , len ) ;
2016-06-04 03:39:34 +03:00
}
2018-04-10 11:19:10 +03:00
static void cqspi_rx_dma_callback ( void * param )
{
struct cqspi_st * cqspi = param ;
complete ( & cqspi - > rx_dma_complete ) ;
}
2020-06-01 10:04:43 +03:00
static int cqspi_direct_read_execute ( struct cqspi_flash_pdata * f_pdata ,
u_char * buf , loff_t from , size_t len )
2018-04-10 11:19:10 +03:00
{
struct cqspi_st * cqspi = f_pdata - > cqspi ;
2020-06-01 10:04:43 +03:00
struct device * dev = & cqspi - > pdev - > dev ;
2018-04-10 11:19:10 +03:00
enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT ;
dma_addr_t dma_src = ( dma_addr_t ) cqspi - > mmap_phys_base + from ;
int ret = 0 ;
struct dma_async_tx_descriptor * tx ;
dma_cookie_t cookie ;
dma_addr_t dma_dst ;
2020-08-31 16:07:20 +03:00
struct device * ddev ;
2018-04-10 11:19:10 +03:00
if ( ! cqspi - > rx_chan | | ! virt_addr_valid ( buf ) ) {
memcpy_fromio ( buf , cqspi - > ahb_base + from , len ) ;
return 0 ;
}
2020-08-31 16:07:20 +03:00
ddev = cqspi - > rx_chan - > device - > dev ;
dma_dst = dma_map_single ( ddev , buf , len , DMA_FROM_DEVICE ) ;
if ( dma_mapping_error ( ddev , dma_dst ) ) {
2020-06-01 10:04:43 +03:00
dev_err ( dev , " dma mapping failed \n " ) ;
2018-04-10 11:19:10 +03:00
return - ENOMEM ;
}
tx = dmaengine_prep_dma_memcpy ( cqspi - > rx_chan , dma_dst , dma_src ,
len , flags ) ;
if ( ! tx ) {
2020-06-01 10:04:43 +03:00
dev_err ( dev , " device_prep_dma_memcpy error \n " ) ;
2018-04-10 11:19:10 +03:00
ret = - EIO ;
goto err_unmap ;
}
tx - > callback = cqspi_rx_dma_callback ;
tx - > callback_param = cqspi ;
cookie = tx - > tx_submit ( tx ) ;
reinit_completion ( & cqspi - > rx_dma_complete ) ;
ret = dma_submit_error ( cookie ) ;
if ( ret ) {
2020-06-01 10:04:43 +03:00
dev_err ( dev , " dma_submit_error %d \n " , cookie ) ;
2018-04-10 11:19:10 +03:00
ret = - EIO ;
goto err_unmap ;
}
dma_async_issue_pending ( cqspi - > rx_chan ) ;
2018-07-21 17:21:51 +03:00
if ( ! wait_for_completion_timeout ( & cqspi - > rx_dma_complete ,
2021-01-08 21:14:57 +03:00
msecs_to_jiffies ( max_t ( size_t , len , 500 ) ) ) ) {
2018-04-10 11:19:10 +03:00
dmaengine_terminate_sync ( cqspi - > rx_chan ) ;
2020-06-01 10:04:43 +03:00
dev_err ( dev , " DMA wait_for_completion_timeout \n " ) ;
2018-04-10 11:19:10 +03:00
ret = - ETIMEDOUT ;
goto err_unmap ;
}
err_unmap :
2020-08-31 16:07:20 +03:00
dma_unmap_single ( ddev , dma_dst , len , DMA_FROM_DEVICE ) ;
2018-04-10 11:19:10 +03:00
2018-10-16 10:13:46 +03:00
return ret ;
2018-04-10 11:19:10 +03:00
}
2020-06-01 10:04:43 +03:00
static ssize_t cqspi_read ( struct cqspi_flash_pdata * f_pdata ,
const struct spi_mem_op * op )
2016-06-04 03:39:34 +03:00
{
2020-06-01 10:04:43 +03:00
struct cqspi_st * cqspi = f_pdata - > cqspi ;
2021-09-24 13:07:11 +03:00
struct device * dev = & cqspi - > pdev - > dev ;
const struct cqspi_driver_platdata * ddata ;
2020-06-01 10:04:43 +03:00
loff_t from = op - > addr . val ;
size_t len = op - > data . nbytes ;
u_char * buf = op - > data . buf . in ;
2021-09-24 13:07:11 +03:00
u64 dma_align = ( u64 ) ( uintptr_t ) buf ;
2016-06-04 03:39:34 +03:00
int ret ;
2021-09-24 13:07:11 +03:00
ddata = of_device_get_match_data ( dev ) ;
2016-06-04 03:39:34 +03:00
2020-06-01 10:04:43 +03:00
ret = cqspi_read_setup ( f_pdata , op ) ;
2016-06-04 03:39:34 +03:00
if ( ret )
return ret ;
2020-06-01 10:04:43 +03:00
if ( cqspi - > use_direct_mode & & ( ( from + len ) < = cqspi - > ahb_size ) )
return cqspi_direct_read_execute ( f_pdata , buf , from , len ) ;
2016-06-04 03:39:34 +03:00
2021-09-24 13:07:11 +03:00
if ( cqspi - > use_dma_read & & ddata & & ddata - > indirect_read_dma & &
virt_addr_valid ( buf ) & & ( ( dma_align & CQSPI_DMA_UNALIGN ) = = 0 ) )
return ddata - > indirect_read_dma ( f_pdata , buf , from , len ) ;
2020-06-01 10:04:43 +03:00
return cqspi_indirect_read_execute ( f_pdata , buf , from , len ) ;
2016-06-04 03:39:34 +03:00
}
2020-06-01 10:04:43 +03:00
static int cqspi_mem_process ( struct spi_mem * mem , const struct spi_mem_op * op )
2016-06-04 03:39:34 +03:00
{
2020-06-01 10:04:43 +03:00
struct cqspi_st * cqspi = spi_master_get_devdata ( mem - > spi - > master ) ;
struct cqspi_flash_pdata * f_pdata ;
2016-06-04 03:39:34 +03:00
2020-06-01 10:04:43 +03:00
f_pdata = & cqspi - > f_pdata [ mem - > spi - > chip_select ] ;
cqspi_configure ( f_pdata , mem - > spi - > max_speed_hz ) ;
2016-06-04 03:39:34 +03:00
2020-06-01 10:04:43 +03:00
if ( op - > data . dir = = SPI_MEM_DATA_IN & & op - > data . buf . in ) {
2023-01-25 11:10:23 +03:00
/*
* Performing reads in DAC mode forces to read minimum 4 bytes
* which is unsupported on some flash devices during register
* reads , prefer STIG mode for such small reads .
*/
if ( ! op - > addr . nbytes | |
op - > data . nbytes < = CQSPI_STIG_DATA_LEN_MAX )
2020-06-01 10:04:43 +03:00
return cqspi_command_read ( f_pdata , op ) ;
2016-06-04 03:39:34 +03:00
2020-06-01 10:04:43 +03:00
return cqspi_read ( f_pdata , op ) ;
}
2016-06-04 03:39:34 +03:00
2020-06-01 10:04:43 +03:00
if ( ! op - > addr . nbytes | | ! op - > data . buf . out )
return cqspi_command_write ( f_pdata , op ) ;
2016-06-04 03:39:34 +03:00
2020-06-01 10:04:43 +03:00
return cqspi_write ( f_pdata , op ) ;
2016-06-04 03:39:34 +03:00
}
2020-06-01 10:04:43 +03:00
static int cqspi_exec_mem_op ( struct spi_mem * mem , const struct spi_mem_op * op )
2016-06-04 03:39:34 +03:00
{
int ret ;
2020-06-01 10:04:43 +03:00
ret = cqspi_mem_process ( mem , op ) ;
if ( ret )
dev_err ( & mem - > spi - > dev , " operation failed with %d \n " , ret ) ;
2016-06-04 03:39:34 +03:00
return ret ;
}
2020-12-22 21:44:23 +03:00
static bool cqspi_supports_mem_op ( struct spi_mem * mem ,
const struct spi_mem_op * op )
{
2020-12-22 21:44:25 +03:00
bool all_true , all_false ;
2021-07-17 02:25:03 +03:00
/*
* op - > dummy . dtr is required for converting nbytes into ncycles .
* Also , don ' t check the dtr field of the op phase having zero nbytes .
*/
all_true = op - > cmd . dtr & &
( ! op - > addr . nbytes | | op - > addr . dtr ) & &
( ! op - > dummy . nbytes | | op - > dummy . dtr ) & &
( ! op - > data . nbytes | | op - > data . dtr ) ;
2020-12-22 21:44:25 +03:00
all_false = ! op - > cmd . dtr & & ! op - > addr . dtr & & ! op - > dummy . dtr & &
! op - > data . dtr ;
2022-04-06 16:28:32 +03:00
if ( all_true ) {
/* Right now we only support 8-8-8 DTR mode. */
if ( op - > cmd . nbytes & & op - > cmd . buswidth ! = 8 )
return false ;
if ( op - > addr . nbytes & & op - > addr . buswidth ! = 8 )
return false ;
if ( op - > data . nbytes & & op - > data . buswidth ! = 8 )
return false ;
2022-04-20 18:56:16 +03:00
} else if ( ! all_false ) {
2022-04-06 16:28:32 +03:00
/* Mixed DTR modes are not supported. */
2020-12-22 21:44:25 +03:00
return false ;
2022-04-06 16:28:32 +03:00
}
2020-12-22 21:44:25 +03:00
2022-01-27 12:18:00 +03:00
return spi_mem_default_supports_op ( mem , op ) ;
2020-12-22 21:44:23 +03:00
}
2016-06-04 03:39:34 +03:00
static int cqspi_of_get_flash_pdata ( struct platform_device * pdev ,
struct cqspi_flash_pdata * f_pdata ,
struct device_node * np )
{
if ( of_property_read_u32 ( np , " cdns,read-delay " , & f_pdata - > read_delay ) ) {
dev_err ( & pdev - > dev , " couldn't determine read-delay \n " ) ;
return - ENXIO ;
}
if ( of_property_read_u32 ( np , " cdns,tshsl-ns " , & f_pdata - > tshsl_ns ) ) {
dev_err ( & pdev - > dev , " couldn't determine tshsl-ns \n " ) ;
return - ENXIO ;
}
if ( of_property_read_u32 ( np , " cdns,tsd2d-ns " , & f_pdata - > tsd2d_ns ) ) {
dev_err ( & pdev - > dev , " couldn't determine tsd2d-ns \n " ) ;
return - ENXIO ;
}
if ( of_property_read_u32 ( np , " cdns,tchsh-ns " , & f_pdata - > tchsh_ns ) ) {
dev_err ( & pdev - > dev , " couldn't determine tchsh-ns \n " ) ;
return - ENXIO ;
}
if ( of_property_read_u32 ( np , " cdns,tslch-ns " , & f_pdata - > tslch_ns ) ) {
dev_err ( & pdev - > dev , " couldn't determine tslch-ns \n " ) ;
return - ENXIO ;
}
if ( of_property_read_u32 ( np , " spi-max-frequency " , & f_pdata - > clk_rate ) ) {
dev_err ( & pdev - > dev , " couldn't determine spi-max-frequency \n " ) ;
return - ENXIO ;
}
return 0 ;
}
2020-06-01 10:04:43 +03:00
static int cqspi_of_get_pdata ( struct cqspi_st * cqspi )
2016-06-04 03:39:34 +03:00
{
2020-06-01 10:04:43 +03:00
struct device * dev = & cqspi - > pdev - > dev ;
struct device_node * np = dev - > of_node ;
2021-09-24 13:07:10 +03:00
u32 id [ 2 ] ;
2016-06-04 03:39:34 +03:00
cqspi - > is_decoded_cs = of_property_read_bool ( np , " cdns,is-decoded-cs " ) ;
if ( of_property_read_u32 ( np , " cdns,fifo-depth " , & cqspi - > fifo_depth ) ) {
2020-06-01 10:04:43 +03:00
dev_err ( dev , " couldn't determine fifo-depth \n " ) ;
2016-06-04 03:39:34 +03:00
return - ENXIO ;
}
if ( of_property_read_u32 ( np , " cdns,fifo-width " , & cqspi - > fifo_width ) ) {
2020-06-01 10:04:43 +03:00
dev_err ( dev , " couldn't determine fifo-width \n " ) ;
2016-06-04 03:39:34 +03:00
return - ENXIO ;
}
if ( of_property_read_u32 ( np , " cdns,trigger-address " ,
& cqspi - > trigger_address ) ) {
2020-06-01 10:04:43 +03:00
dev_err ( dev , " couldn't determine trigger-address \n " ) ;
2016-06-04 03:39:34 +03:00
return - ENXIO ;
}
2020-11-24 07:18:38 +03:00
if ( of_property_read_u32 ( np , " num-cs " , & cqspi - > num_chipselect ) )
cqspi - > num_chipselect = CQSPI_MAX_CHIPSELECT ;
2017-10-03 08:19:23 +03:00
cqspi - > rclk_en = of_property_read_bool ( np , " cdns,rclk-en " ) ;
2021-09-24 13:07:10 +03:00
if ( ! of_property_read_u32_array ( np , " power-domains " , id ,
ARRAY_SIZE ( id ) ) )
cqspi - > pd_dev_id = id [ 1 ] ;
2016-06-04 03:39:34 +03:00
return 0 ;
}
static void cqspi_controller_init ( struct cqspi_st * cqspi )
{
2017-12-29 12:11:03 +03:00
u32 reg ;
2016-06-04 03:39:34 +03:00
cqspi_controller_enable ( cqspi , 0 ) ;
/* Configure the remap address register, no remap */
writel ( 0 , cqspi - > iobase + CQSPI_REG_REMAP ) ;
/* Disable all interrupts. */
writel ( 0 , cqspi - > iobase + CQSPI_REG_IRQMASK ) ;
/* Configure the SRAM split to 1:1 . */
writel ( cqspi - > fifo_depth / 2 , cqspi - > iobase + CQSPI_REG_SRAMPARTITION ) ;
/* Load indirect trigger address. */
writel ( cqspi - > trigger_address ,
cqspi - > iobase + CQSPI_REG_INDIRECTTRIGGER ) ;
/* Program read watermark -- 1/2 of the FIFO. */
writel ( cqspi - > fifo_depth * cqspi - > fifo_width / 2 ,
cqspi - > iobase + CQSPI_REG_INDIRECTRDWATERMARK ) ;
/* Program write watermark -- 1/8 of the FIFO. */
writel ( cqspi - > fifo_depth * cqspi - > fifo_width / 8 ,
cqspi - > iobase + CQSPI_REG_INDIRECTWRWATERMARK ) ;
2020-11-24 07:18:37 +03:00
/* Disable direct access controller */
if ( ! cqspi - > use_direct_mode ) {
reg = readl ( cqspi - > iobase + CQSPI_REG_CONFIG ) ;
reg & = ~ CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL ;
writel ( reg , cqspi - > iobase + CQSPI_REG_CONFIG ) ;
}
2017-12-29 12:11:03 +03:00
2021-09-24 13:07:11 +03:00
/* Enable DMA interface */
if ( cqspi - > use_dma_read ) {
reg = readl ( cqspi - > iobase + CQSPI_REG_CONFIG ) ;
reg | = CQSPI_REG_CONFIG_DMA_MASK ;
writel ( reg , cqspi - > iobase + CQSPI_REG_CONFIG ) ;
}
2016-06-04 03:39:34 +03:00
cqspi_controller_enable ( cqspi , 1 ) ;
}
2020-06-01 10:04:41 +03:00
static int cqspi_request_mmap_dma ( struct cqspi_st * cqspi )
2018-04-10 11:19:10 +03:00
{
dma_cap_mask_t mask ;
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_MEMCPY , mask ) ;
cqspi - > rx_chan = dma_request_chan_by_mask ( & mask ) ;
if ( IS_ERR ( cqspi - > rx_chan ) ) {
2020-06-01 10:04:41 +03:00
int ret = PTR_ERR ( cqspi - > rx_chan ) ;
2022-05-10 14:51:40 +03:00
2018-04-10 11:19:10 +03:00
cqspi - > rx_chan = NULL ;
2020-09-01 18:27:07 +03:00
return dev_err_probe ( & cqspi - > pdev - > dev , ret , " No Rx DMA available \n " ) ;
2018-04-10 11:19:10 +03:00
}
init_completion ( & cqspi - > rx_dma_complete ) ;
2020-06-01 10:04:41 +03:00
return 0 ;
2018-04-10 11:19:10 +03:00
}
2020-08-25 20:25:06 +03:00
static const char * cqspi_get_name ( struct spi_mem * mem )
{
struct cqspi_st * cqspi = spi_master_get_devdata ( mem - > spi - > master ) ;
struct device * dev = & cqspi - > pdev - > dev ;
return devm_kasprintf ( dev , GFP_KERNEL , " %s.%d " , dev_name ( dev ) , mem - > spi - > chip_select ) ;
}
2020-06-01 10:04:43 +03:00
static const struct spi_controller_mem_ops cqspi_mem_ops = {
. exec_op = cqspi_exec_mem_op ,
2020-08-25 20:25:06 +03:00
. get_name = cqspi_get_name ,
2020-12-22 21:44:23 +03:00
. supports_op = cqspi_supports_mem_op ,
2019-09-24 10:45:53 +03:00
} ;
2022-01-27 12:17:58 +03:00
static const struct spi_controller_mem_caps cqspi_mem_caps = {
. dtr = true ,
} ;
2020-06-01 10:04:43 +03:00
static int cqspi_setup_flash ( struct cqspi_st * cqspi )
2016-06-04 03:39:34 +03:00
{
struct platform_device * pdev = cqspi - > pdev ;
struct device * dev = & pdev - > dev ;
2020-06-01 10:04:43 +03:00
struct device_node * np = dev - > of_node ;
2016-06-04 03:39:34 +03:00
struct cqspi_flash_pdata * f_pdata ;
unsigned int cs ;
2020-06-01 10:04:43 +03:00
int ret ;
2019-02-12 11:38:09 +03:00
2016-06-04 03:39:34 +03:00
/* Get flash device data */
for_each_available_child_of_node ( dev - > of_node , np ) {
2016-10-13 11:30:39 +03:00
ret = of_property_read_u32 ( np , " reg " , & cs ) ;
if ( ret ) {
2016-06-04 03:39:34 +03:00
dev_err ( dev , " Couldn't determine chip select. \n " ) ;
2021-02-15 14:04:25 +03:00
of_node_put ( np ) ;
2020-06-01 10:04:43 +03:00
return ret ;
2016-06-04 03:39:34 +03:00
}
2016-10-13 11:06:47 +03:00
if ( cs > = CQSPI_MAX_CHIPSELECT ) {
2016-06-04 03:39:34 +03:00
dev_err ( dev , " Chip select %d out of range. \n " , cs ) ;
2021-02-15 14:04:25 +03:00
of_node_put ( np ) ;
2020-06-01 10:04:43 +03:00
return - EINVAL ;
2016-06-04 03:39:34 +03:00
}
f_pdata = & cqspi - > f_pdata [ cs ] ;
f_pdata - > cqspi = cqspi ;
f_pdata - > cs = cs ;
ret = cqspi_of_get_flash_pdata ( pdev , f_pdata , np ) ;
2021-02-15 14:04:25 +03:00
if ( ret ) {
of_node_put ( np ) ;
2020-06-01 10:04:43 +03:00
return ret ;
2021-02-15 14:04:25 +03:00
}
2016-06-04 03:39:34 +03:00
}
return 0 ;
}
static int cqspi_probe ( struct platform_device * pdev )
{
2020-06-01 10:04:43 +03:00
const struct cqspi_driver_platdata * ddata ;
struct reset_control * rstc , * rstc_ocp ;
2016-06-04 03:39:34 +03:00
struct device * dev = & pdev - > dev ;
2020-06-01 10:04:43 +03:00
struct spi_master * master ;
struct resource * res_ahb ;
2016-06-04 03:39:34 +03:00
struct cqspi_st * cqspi ;
int ret ;
int irq ;
2022-05-11 14:55:16 +03:00
master = devm_spi_alloc_master ( & pdev - > dev , sizeof ( * cqspi ) ) ;
2020-06-01 10:04:43 +03:00
if ( ! master ) {
dev_err ( & pdev - > dev , " spi_alloc_master failed \n " ) ;
2016-06-04 03:39:34 +03:00
return - ENOMEM ;
2020-06-01 10:04:43 +03:00
}
master - > mode_bits = SPI_RX_QUAD | SPI_RX_DUAL ;
master - > mem_ops = & cqspi_mem_ops ;
2022-01-27 12:17:58 +03:00
master - > mem_caps = & cqspi_mem_caps ;
2020-06-01 10:04:43 +03:00
master - > dev . of_node = pdev - > dev . of_node ;
cqspi = spi_master_get_devdata ( master ) ;
2016-06-04 03:39:34 +03:00
cqspi - > pdev = pdev ;
2022-05-11 14:55:16 +03:00
cqspi - > master = master ;
2021-03-11 12:12:20 +03:00
platform_set_drvdata ( pdev , cqspi ) ;
2016-06-04 03:39:34 +03:00
/* Obtain configuration from OF. */
2020-06-01 10:04:43 +03:00
ret = cqspi_of_get_pdata ( cqspi ) ;
2016-06-04 03:39:34 +03:00
if ( ret ) {
dev_err ( dev , " Cannot get mandatory OF data. \n " ) ;
2022-06-01 10:16:11 +03:00
return - ENODEV ;
2016-06-04 03:39:34 +03:00
}
/* Obtain QSPI clock. */
cqspi - > clk = devm_clk_get ( dev , NULL ) ;
if ( IS_ERR ( cqspi - > clk ) ) {
dev_err ( dev , " Cannot claim QSPI clock. \n " ) ;
2020-06-01 10:04:43 +03:00
ret = PTR_ERR ( cqspi - > clk ) ;
2022-06-01 10:16:11 +03:00
return ret ;
2016-06-04 03:39:34 +03:00
}
/* Obtain and remap controller address. */
2022-09-28 17:58:52 +03:00
cqspi - > iobase = devm_platform_ioremap_resource ( pdev , 0 ) ;
2016-06-04 03:39:34 +03:00
if ( IS_ERR ( cqspi - > iobase ) ) {
dev_err ( dev , " Cannot remap controller address. \n " ) ;
2020-06-01 10:04:43 +03:00
ret = PTR_ERR ( cqspi - > iobase ) ;
2022-06-01 10:16:11 +03:00
return ret ;
2016-06-04 03:39:34 +03:00
}
/* Obtain and remap AHB address. */
2022-09-28 17:58:52 +03:00
cqspi - > ahb_base = devm_platform_get_and_ioremap_resource ( pdev , 1 , & res_ahb ) ;
2016-06-04 03:39:34 +03:00
if ( IS_ERR ( cqspi - > ahb_base ) ) {
dev_err ( dev , " Cannot remap AHB address. \n " ) ;
2020-06-01 10:04:43 +03:00
ret = PTR_ERR ( cqspi - > ahb_base ) ;
2022-06-01 10:16:11 +03:00
return ret ;
2016-06-04 03:39:34 +03:00
}
2018-04-10 11:19:10 +03:00
cqspi - > mmap_phys_base = ( dma_addr_t ) res_ahb - > start ;
2017-12-29 12:11:03 +03:00
cqspi - > ahb_size = resource_size ( res_ahb ) ;
2016-06-04 03:39:34 +03:00
init_completion ( & cqspi - > transfer_complete ) ;
/* Obtain IRQ line. */
irq = platform_get_irq ( pdev , 0 ) ;
2022-06-01 10:16:11 +03:00
if ( irq < 0 )
return - ENXIO ;
2016-06-04 03:39:34 +03:00
2017-10-03 08:19:25 +03:00
pm_runtime_enable ( dev ) ;
2022-04-14 11:56:37 +03:00
ret = pm_runtime_resume_and_get ( dev ) ;
if ( ret < 0 )
2022-09-24 15:13:07 +03:00
goto probe_pm_failed ;
2017-10-03 08:19:25 +03:00
2016-06-04 03:39:34 +03:00
ret = clk_prepare_enable ( cqspi - > clk ) ;
if ( ret ) {
dev_err ( dev , " Cannot enable QSPI clock. \n " ) ;
2017-10-03 08:19:25 +03:00
goto probe_clk_failed ;
2016-06-04 03:39:34 +03:00
}
2019-06-13 14:31:38 +03:00
/* Obtain QSPI reset control */
rstc = devm_reset_control_get_optional_exclusive ( dev , " qspi " ) ;
if ( IS_ERR ( rstc ) ) {
2020-11-16 17:18:36 +03:00
ret = PTR_ERR ( rstc ) ;
2019-06-13 14:31:38 +03:00
dev_err ( dev , " Cannot get QSPI reset. \n " ) ;
2020-06-01 10:04:40 +03:00
goto probe_reset_failed ;
2019-06-13 14:31:38 +03:00
}
rstc_ocp = devm_reset_control_get_optional_exclusive ( dev , " qspi-ocp " ) ;
if ( IS_ERR ( rstc_ocp ) ) {
2020-11-16 17:18:36 +03:00
ret = PTR_ERR ( rstc_ocp ) ;
2019-06-13 14:31:38 +03:00
dev_err ( dev , " Cannot get QSPI OCP reset. \n " ) ;
2020-06-01 10:04:40 +03:00
goto probe_reset_failed ;
2019-06-13 14:31:38 +03:00
}
reset_control_assert ( rstc ) ;
reset_control_deassert ( rstc ) ;
reset_control_assert ( rstc_ocp ) ;
reset_control_deassert ( rstc_ocp ) ;
2016-06-04 03:39:34 +03:00
cqspi - > master_ref_clk_hz = clk_get_rate ( cqspi - > clk ) ;
2020-12-22 21:44:19 +03:00
master - > max_speed_hz = cqspi - > master_ref_clk_hz ;
2021-11-08 23:08:54 +03:00
/* write completion is supported by default */
cqspi - > wr_completion = true ;
2019-02-12 11:38:09 +03:00
ddata = of_device_get_match_data ( dev ) ;
2020-06-01 10:04:43 +03:00
if ( ddata ) {
if ( ddata - > quirks & CQSPI_NEEDS_WR_DELAY )
2020-12-22 21:44:25 +03:00
cqspi - > wr_delay = 50 * DIV_ROUND_UP ( NSEC_PER_SEC ,
2020-06-01 10:04:43 +03:00
cqspi - > master_ref_clk_hz ) ;
if ( ddata - > hwcaps_mask & CQSPI_SUPPORTS_OCTAL )
2020-12-22 21:44:25 +03:00
master - > mode_bits | = SPI_RX_OCTAL | SPI_TX_OCTAL ;
2023-01-25 11:10:21 +03:00
if ( ! ( ddata - > quirks & CQSPI_DISABLE_DAC_MODE ) ) {
2020-06-01 10:04:43 +03:00
cqspi - > use_direct_mode = true ;
2023-01-25 11:10:21 +03:00
cqspi - > use_direct_mode_wr = true ;
}
2021-09-24 13:07:11 +03:00
if ( ddata - > quirks & CQSPI_SUPPORT_EXTERNAL_DMA )
cqspi - > use_dma_read = true ;
2021-11-08 23:08:54 +03:00
if ( ddata - > quirks & CQSPI_NO_SUPPORT_WR_COMPLETION )
cqspi - > wr_completion = false ;
2022-08-13 07:26:16 +03:00
if ( ddata - > quirks & CQSPI_SLOW_SRAM )
cqspi - > slow_sram = true ;
2021-09-24 13:07:11 +03:00
2021-09-24 13:07:10 +03:00
if ( of_device_is_compatible ( pdev - > dev . of_node ,
2021-09-24 13:07:11 +03:00
" xlnx,versal-ospi-1.0 " ) )
dma_set_mask ( & pdev - > dev , DMA_BIT_MASK ( 64 ) ) ;
2020-06-01 10:04:43 +03:00
}
2016-06-04 03:39:34 +03:00
ret = devm_request_irq ( dev , irq , cqspi_irq_handler , 0 ,
pdev - > name , cqspi ) ;
if ( ret ) {
dev_err ( dev , " Cannot request IRQ. \n " ) ;
2020-06-01 10:04:40 +03:00
goto probe_reset_failed ;
2016-06-04 03:39:34 +03:00
}
cqspi_wait_idle ( cqspi ) ;
cqspi_controller_init ( cqspi ) ;
cqspi - > current_cs = - 1 ;
cqspi - > sclk = 0 ;
2020-11-24 07:18:38 +03:00
master - > num_chipselect = cqspi - > num_chipselect ;
2020-06-01 10:04:43 +03:00
ret = cqspi_setup_flash ( cqspi ) ;
2016-06-04 03:39:34 +03:00
if ( ret ) {
2020-06-01 10:04:43 +03:00
dev_err ( dev , " failed to setup flash parameters %d \n " , ret ) ;
2016-06-04 03:39:34 +03:00
goto probe_setup_failed ;
}
2020-06-01 10:04:43 +03:00
if ( cqspi - > use_direct_mode ) {
ret = cqspi_request_mmap_dma ( cqspi ) ;
if ( ret = = - EPROBE_DEFER )
goto probe_setup_failed ;
}
2022-05-11 14:55:16 +03:00
ret = spi_register_master ( master ) ;
2020-06-01 10:04:43 +03:00
if ( ret ) {
dev_err ( & pdev - > dev , " failed to register SPI ctlr %d \n " , ret ) ;
goto probe_setup_failed ;
}
return 0 ;
2016-06-04 03:39:34 +03:00
probe_setup_failed :
2017-10-03 08:19:24 +03:00
cqspi_controller_enable ( cqspi , 0 ) ;
2020-06-01 10:04:40 +03:00
probe_reset_failed :
2016-06-04 03:39:34 +03:00
clk_disable_unprepare ( cqspi - > clk ) ;
2017-10-03 08:19:25 +03:00
probe_clk_failed :
pm_runtime_put_sync ( dev ) ;
2022-09-24 15:13:07 +03:00
probe_pm_failed :
2017-10-03 08:19:25 +03:00
pm_runtime_disable ( dev ) ;
2016-06-04 03:39:34 +03:00
return ret ;
}
static int cqspi_remove ( struct platform_device * pdev )
{
struct cqspi_st * cqspi = platform_get_drvdata ( pdev ) ;
2022-05-11 14:55:16 +03:00
spi_unregister_master ( cqspi - > master ) ;
2016-06-04 03:39:34 +03:00
cqspi_controller_enable ( cqspi , 0 ) ;
2018-04-10 11:19:10 +03:00
if ( cqspi - > rx_chan )
dma_release_channel ( cqspi - > rx_chan ) ;
2016-06-04 03:39:34 +03:00
clk_disable_unprepare ( cqspi - > clk ) ;
2017-10-03 08:19:25 +03:00
pm_runtime_put_sync ( & pdev - > dev ) ;
pm_runtime_disable ( & pdev - > dev ) ;
2016-06-04 03:39:34 +03:00
return 0 ;
}
# ifdef CONFIG_PM_SLEEP
static int cqspi_suspend ( struct device * dev )
{
struct cqspi_st * cqspi = dev_get_drvdata ( dev ) ;
cqspi_controller_enable ( cqspi , 0 ) ;
return 0 ;
}
static int cqspi_resume ( struct device * dev )
{
struct cqspi_st * cqspi = dev_get_drvdata ( dev ) ;
cqspi_controller_enable ( cqspi , 1 ) ;
return 0 ;
}
static const struct dev_pm_ops cqspi__dev_pm_ops = {
. suspend = cqspi_suspend ,
. resume = cqspi_resume ,
} ;
# define CQSPI_DEV_PM_OPS (&cqspi__dev_pm_ops)
# else
# define CQSPI_DEV_PM_OPS NULL
# endif
2019-02-12 11:38:09 +03:00
static const struct cqspi_driver_platdata cdns_qspi = {
2020-06-01 10:04:38 +03:00
. quirks = CQSPI_DISABLE_DAC_MODE ,
2019-02-12 11:38:09 +03:00
} ;
static const struct cqspi_driver_platdata k2g_qspi = {
. quirks = CQSPI_NEEDS_WR_DELAY ,
} ;
static const struct cqspi_driver_platdata am654_ospi = {
2020-06-01 10:04:43 +03:00
. hwcaps_mask = CQSPI_SUPPORTS_OCTAL ,
2019-02-12 11:38:09 +03:00
. quirks = CQSPI_NEEDS_WR_DELAY ,
} ;
2020-11-24 07:18:37 +03:00
static const struct cqspi_driver_platdata intel_lgm_qspi = {
. quirks = CQSPI_DISABLE_DAC_MODE ,
} ;
2021-11-08 23:08:54 +03:00
static const struct cqspi_driver_platdata socfpga_qspi = {
2022-08-13 07:26:16 +03:00
. quirks = CQSPI_DISABLE_DAC_MODE
| CQSPI_NO_SUPPORT_WR_COMPLETION
| CQSPI_SLOW_SRAM ,
2021-11-08 23:08:54 +03:00
} ;
2021-09-24 13:07:10 +03:00
static const struct cqspi_driver_platdata versal_ospi = {
. hwcaps_mask = CQSPI_SUPPORTS_OCTAL ,
2021-09-24 13:07:11 +03:00
. quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA ,
. indirect_read_dma = cqspi_versal_indirect_read_dma ,
. get_dma_status = cqspi_get_versal_dma_status ,
2021-09-24 13:07:10 +03:00
} ;
2017-06-27 18:34:19 +03:00
static const struct of_device_id cqspi_dt_ids [ ] = {
2017-10-03 08:19:21 +03:00
{
. compatible = " cdns,qspi-nor " ,
2019-02-12 11:38:09 +03:00
. data = & cdns_qspi ,
2017-10-03 08:19:21 +03:00
} ,
{
. compatible = " ti,k2g-qspi " ,
2019-02-12 11:38:09 +03:00
. data = & k2g_qspi ,
} ,
{
. compatible = " ti,am654-ospi " ,
. data = & am654_ospi ,
2017-10-03 08:19:21 +03:00
} ,
2020-11-24 07:18:36 +03:00
{
. compatible = " intel,lgm-qspi " ,
2020-11-24 07:18:37 +03:00
. data = & intel_lgm_qspi ,
2020-11-24 07:18:36 +03:00
} ,
2021-09-24 13:07:10 +03:00
{
. compatible = " xlnx,versal-ospi-1.0 " ,
2022-05-10 14:51:41 +03:00
. data = & versal_ospi ,
2021-09-24 13:07:10 +03:00
} ,
2021-11-08 23:08:54 +03:00
{
. compatible = " intel,socfpga-qspi " ,
2022-05-10 14:51:41 +03:00
. data = & socfpga_qspi ,
2021-11-08 23:08:54 +03:00
} ,
2016-06-04 03:39:34 +03:00
{ /* end of table */ }
} ;
MODULE_DEVICE_TABLE ( of , cqspi_dt_ids ) ;
static struct platform_driver cqspi_platform_driver = {
. probe = cqspi_probe ,
. remove = cqspi_remove ,
. driver = {
. name = CQSPI_NAME ,
. pm = CQSPI_DEV_PM_OPS ,
. of_match_table = cqspi_dt_ids ,
} ,
} ;
module_platform_driver ( cqspi_platform_driver ) ;
MODULE_DESCRIPTION ( " Cadence QSPI Controller Driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_ALIAS ( " platform: " CQSPI_NAME ) ;
MODULE_AUTHOR ( " Ley Foon Tan <lftan@altera.com> " ) ;
MODULE_AUTHOR ( " Graham Moore <grmoore@opensource.altera.com> " ) ;
2020-06-01 10:04:43 +03:00
MODULE_AUTHOR ( " Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com> " ) ;
MODULE_AUTHOR ( " Vignesh Raghavendra <vigneshr@ti.com> " ) ;
2020-12-22 21:44:25 +03:00
MODULE_AUTHOR ( " Pratyush Yadav <p.yadav@ti.com> " ) ;