2018-01-26 12:50:27 -06:00
// SPDX-License-Identifier: GPL-2.0
2016-06-30 11:32:31 +02:00
/*
* Driver for the Aardvark PCIe controller , used on Marvell Armada
* 3700.
*
* Copyright ( C ) 2016 Marvell
*
2016-08-01 12:32:13 -05:00
* Author : Hezi Shahmoon < hezi . shahmoon @ marvell . com >
2016-06-30 11:32:31 +02:00
*/
# include <linux/delay.h>
2020-09-07 13:10:34 +02:00
# include <linux/gpio/consumer.h>
2016-06-30 11:32:31 +02:00
# include <linux/interrupt.h>
# include <linux/irq.h>
# include <linux/irqdomain.h>
# include <linux/kernel.h>
2020-09-07 13:10:37 +02:00
# include <linux/module.h>
2016-06-30 11:32:31 +02:00
# include <linux/pci.h>
2020-11-29 23:07:39 +00:00
# include <linux/pci-ecam.h>
2016-08-01 12:32:13 -05:00
# include <linux/init.h>
2020-04-30 10:06:20 +02:00
# include <linux/phy/phy.h>
2016-06-30 11:32:31 +02:00
# include <linux/platform_device.h>
2019-09-03 13:30:59 +02:00
# include <linux/msi.h>
2016-06-30 11:32:31 +02:00
# include <linux/of_address.h>
2020-04-30 10:06:18 +02:00
# include <linux/of_gpio.h>
2016-06-30 11:32:31 +02:00
# include <linux/of_pci.h>
2018-05-11 12:15:30 -05:00
# include "../pci.h"
2018-10-18 17:37:19 +02:00
# include "../pci-bridge-emul.h"
2018-05-11 12:15:30 -05:00
2016-06-30 11:32:31 +02:00
/* PCIe core registers */
2018-10-18 17:37:19 +02:00
# define PCIE_CORE_DEV_ID_REG 0x0
2016-06-30 11:32:31 +02:00
# define PCIE_CORE_CMD_STATUS_REG 0x4
# define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
# define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1)
# define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
2018-10-18 17:37:19 +02:00
# define PCIE_CORE_DEV_REV_REG 0x8
# define PCIE_CORE_PCIEXP_CAP 0xc0
2016-06-30 11:32:31 +02:00
# define PCIE_CORE_ERR_CAPCTL_REG 0x118
# define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
# define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
# define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7)
# define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8)
2018-10-18 17:37:19 +02:00
# define PCIE_CORE_INT_A_ASSERT_ENABLE 1
# define PCIE_CORE_INT_B_ASSERT_ENABLE 2
# define PCIE_CORE_INT_C_ASSERT_ENABLE 3
# define PCIE_CORE_INT_D_ASSERT_ENABLE 4
2016-06-30 11:32:31 +02:00
/* PIO registers base address and register offsets */
# define PIO_BASE_ADDR 0x4000
# define PIO_CTRL (PIO_BASE_ADDR + 0x0)
# define PIO_CTRL_TYPE_MASK GENMASK(3, 0)
# define PIO_CTRL_ADDR_WIN_DISABLE BIT(24)
# define PIO_STAT (PIO_BASE_ADDR + 0x4)
# define PIO_COMPLETION_STATUS_SHIFT 7
# define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
# define PIO_COMPLETION_STATUS_OK 0
# define PIO_COMPLETION_STATUS_UR 1
# define PIO_COMPLETION_STATUS_CRS 2
# define PIO_COMPLETION_STATUS_CA 4
2021-06-24 23:33:43 +02:00
# define PIO_NON_POSTED_REQ BIT(10)
2016-06-30 11:32:31 +02:00
# define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
# define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
# define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
# define PIO_WR_DATA_STRB (PIO_BASE_ADDR + 0x14)
# define PIO_RD_DATA (PIO_BASE_ADDR + 0x18)
# define PIO_START (PIO_BASE_ADDR + 0x1c)
# define PIO_ISR (PIO_BASE_ADDR + 0x20)
# define PIO_ISRM (PIO_BASE_ADDR + 0x24)
/* Aardvark Control registers */
# define CONTROL_BASE_ADDR 0x4800
# define PCIE_CORE_CTRL0_REG (CONTROL_BASE_ADDR + 0x0)
# define PCIE_GEN_SEL_MSK 0x3
# define PCIE_GEN_SEL_SHIFT 0x0
# define SPEED_GEN_1 0
# define SPEED_GEN_2 1
# define SPEED_GEN_3 2
# define IS_RC_MSK 1
# define IS_RC_SHIFT 2
# define LANE_CNT_MSK 0x18
# define LANE_CNT_SHIFT 0x3
# define LANE_COUNT_1 (0 << LANE_CNT_SHIFT)
# define LANE_COUNT_2 (1 << LANE_CNT_SHIFT)
# define LANE_COUNT_4 (2 << LANE_CNT_SHIFT)
# define LANE_COUNT_8 (3 << LANE_CNT_SHIFT)
# define LINK_TRAINING_EN BIT(6)
# define LEGACY_INTA BIT(28)
# define LEGACY_INTB BIT(29)
# define LEGACY_INTC BIT(30)
# define LEGACY_INTD BIT(31)
# define PCIE_CORE_CTRL1_REG (CONTROL_BASE_ADDR + 0x4)
# define HOT_RESET_GEN BIT(0)
# define PCIE_CORE_CTRL2_REG (CONTROL_BASE_ADDR + 0x8)
# define PCIE_CORE_CTRL2_RESERVED 0x7
# define PCIE_CORE_CTRL2_TD_ENABLE BIT(4)
# define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5)
# define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6)
# define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10)
2020-04-30 10:06:20 +02:00
# define PCIE_CORE_REF_CLK_REG (CONTROL_BASE_ADDR + 0x14)
# define PCIE_CORE_REF_CLK_TX_ENABLE BIT(1)
2018-10-18 17:37:19 +02:00
# define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30)
2016-06-30 11:32:31 +02:00
# define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40)
2018-10-18 17:37:19 +02:00
# define PCIE_MSG_PM_PME_MASK BIT(7)
2016-06-30 11:32:31 +02:00
# define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44)
# define PCIE_ISR0_MSI_INT_PENDING BIT(24)
# define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
# define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val))
# define PCIE_ISR0_ALL_MASK GENMASK(26, 0)
# define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48)
# define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
# define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
# define PCIE_ISR1_FLUSH BIT(5)
2018-04-06 16:55:33 +02:00
# define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val))
# define PCIE_ISR1_ALL_MASK GENMASK(11, 4)
2016-06-30 11:32:31 +02:00
# define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
# define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
# define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
# define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
# define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
/* LMI registers base address and register offsets */
# define LMI_BASE_ADDR 0x6000
# define CFG_REG (LMI_BASE_ADDR + 0x0)
# define LTSSM_SHIFT 24
# define LTSSM_MASK 0x3f
# define LTSSM_L0 0x10
# define RC_BAR_CONFIG 0x300
2021-06-25 00:26:20 +02:00
# define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44)
2016-06-30 11:32:31 +02:00
/* PCIe core controller registers */
# define CTRL_CORE_BASE_ADDR 0x18000
# define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0)
# define CTRL_MODE_SHIFT 0x0
# define CTRL_MODE_MASK 0x1
# define PCIE_CORE_MODE_DIRECT 0x0
# define PCIE_CORE_MODE_COMMAND 0x1
/* PCIe Central Interrupts Registers */
# define CENTRAL_INT_BASE_ADDR 0x1b000
# define HOST_CTRL_INT_STATUS_REG (CENTRAL_INT_BASE_ADDR + 0x0)
# define HOST_CTRL_INT_MASK_REG (CENTRAL_INT_BASE_ADDR + 0x4)
# define PCIE_IRQ_CMDQ_INT BIT(0)
# define PCIE_IRQ_MSI_STATUS_INT BIT(1)
# define PCIE_IRQ_CMD_SENT_DONE BIT(3)
# define PCIE_IRQ_DMA_INT BIT(4)
# define PCIE_IRQ_IB_DXFERDONE BIT(5)
# define PCIE_IRQ_OB_DXFERDONE BIT(6)
# define PCIE_IRQ_OB_RXFERDONE BIT(7)
# define PCIE_IRQ_COMPQ_INT BIT(12)
# define PCIE_IRQ_DIR_RD_DDR_DET BIT(13)
# define PCIE_IRQ_DIR_WR_DDR_DET BIT(14)
# define PCIE_IRQ_CORE_INT BIT(16)
# define PCIE_IRQ_CORE_INT_PIO BIT(17)
# define PCIE_IRQ_DPMU_INT BIT(18)
# define PCIE_IRQ_PCIE_MIS_INT BIT(19)
# define PCIE_IRQ_MSI_INT1_DET BIT(20)
# define PCIE_IRQ_MSI_INT2_DET BIT(21)
# define PCIE_IRQ_RC_DBELL_DET BIT(22)
# define PCIE_IRQ_EP_STATUS BIT(23)
# define PCIE_IRQ_ALL_MASK 0xfff0fb
# define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT
/* Transaction types */
# define PCIE_CONFIG_RD_TYPE0 0x8
# define PCIE_CONFIG_RD_TYPE1 0x9
# define PCIE_CONFIG_WR_TYPE0 0xa
# define PCIE_CONFIG_WR_TYPE1 0xb
2019-09-27 10:55:02 +02:00
# define PIO_RETRY_CNT 500
# define PIO_RETRY_DELAY 2 /* 2 us*/
2016-06-30 11:32:31 +02:00
# define LINK_WAIT_MAX_RETRIES 10
# define LINK_WAIT_USLEEP_MIN 90000
# define LINK_WAIT_USLEEP_MAX 100000
2019-05-22 23:33:51 +02:00
# define RETRAIN_WAIT_MAX_RETRIES 10
# define RETRAIN_WAIT_USLEEP_US 2000
2016-06-30 11:32:31 +02:00
# define MSI_IRQ_NUM 32
struct advk_pcie {
struct platform_device * pdev ;
void __iomem * base ;
struct irq_domain * irq_domain ;
struct irq_chip irq_chip ;
struct irq_domain * msi_domain ;
2017-02-28 15:31:14 +01:00
struct irq_domain * msi_inner_domain ;
struct irq_chip msi_bottom_irq_chip ;
2016-06-30 11:32:31 +02:00
struct irq_chip msi_irq_chip ;
2017-02-28 15:31:14 +01:00
struct msi_domain_info msi_domain_info ;
DECLARE_BITMAP ( msi_used , MSI_IRQ_NUM ) ;
2016-06-30 11:32:31 +02:00
struct mutex msi_used_lock ;
u16 msi_msg ;
2020-04-30 10:06:17 +02:00
int link_gen ;
2018-10-18 17:37:19 +02:00
struct pci_bridge_emul bridge ;
2020-04-30 10:06:18 +02:00
struct gpio_desc * reset_gpio ;
2020-04-30 10:06:20 +02:00
struct phy * phy ;
2016-06-30 11:32:31 +02:00
} ;
static inline void advk_writel ( struct advk_pcie * pcie , u32 val , u64 reg )
{
writel ( val , pcie - > base + reg ) ;
}
static inline u32 advk_readl ( struct advk_pcie * pcie , u64 reg )
{
return readl ( pcie - > base + reg ) ;
}
2020-04-30 10:06:21 +02:00
static inline u16 advk_read16 ( struct advk_pcie * pcie , u64 reg )
{
return advk_readl ( pcie , ( reg & ~ 0x3 ) ) > > ( ( reg & 0x3 ) * 8 ) ;
}
2016-06-30 11:32:31 +02:00
static int advk_pcie_link_up ( struct advk_pcie * pcie )
{
u32 val , ltssm_state ;
val = advk_readl ( pcie , CFG_REG ) ;
ltssm_state = ( val > > LTSSM_SHIFT ) & LTSSM_MASK ;
return ltssm_state > = LTSSM_L0 ;
}
static int advk_pcie_wait_for_link ( struct advk_pcie * pcie )
{
int retries ;
/* check if the link is up or not */
for ( retries = 0 ; retries < LINK_WAIT_MAX_RETRIES ; retries + + ) {
2020-04-30 10:06:17 +02:00
if ( advk_pcie_link_up ( pcie ) )
2016-06-30 11:32:31 +02:00
return 0 ;
usleep_range ( LINK_WAIT_USLEEP_MIN , LINK_WAIT_USLEEP_MAX ) ;
}
return - ETIMEDOUT ;
}
2019-05-22 23:33:51 +02:00
static void advk_pcie_wait_for_retrain ( struct advk_pcie * pcie )
{
size_t retries ;
for ( retries = 0 ; retries < RETRAIN_WAIT_MAX_RETRIES ; + + retries ) {
if ( ! advk_pcie_link_up ( pcie ) )
break ;
udelay ( RETRAIN_WAIT_USLEEP_US ) ;
}
}
2020-09-07 13:10:38 +02:00
static void advk_pcie_issue_perst ( struct advk_pcie * pcie )
{
u32 reg ;
if ( ! pcie - > reset_gpio )
return ;
2020-12-02 19:46:59 +01:00
/*
* As required by PCI Express spec ( PCI Express Base Specification , REV .
* 4.0 PCI Express , February 19 2014 , 6.6 .1 Conventional Reset ) a delay
* for at least 100 ms after de - asserting PERST # signal is needed before
* link training is enabled . So ensure that link training is disabled
* prior de - asserting PERST # signal to fulfill that PCI Express spec
* requirement .
*/
2020-09-07 13:10:38 +02:00
reg = advk_readl ( pcie , PCIE_CORE_CTRL0_REG ) ;
reg & = ~ LINK_TRAINING_EN ;
advk_writel ( pcie , reg , PCIE_CORE_CTRL0_REG ) ;
/* 10ms delay is needed for some cards */
dev_info ( & pcie - > pdev - > dev , " issuing PERST via reset GPIO for 10ms \n " ) ;
gpiod_set_value_cansleep ( pcie - > reset_gpio , 1 ) ;
usleep_range ( 10000 , 11000 ) ;
gpiod_set_value_cansleep ( pcie - > reset_gpio , 0 ) ;
}
2020-04-30 10:06:17 +02:00
static int advk_pcie_train_at_gen ( struct advk_pcie * pcie , int gen )
{
int ret , neg_gen ;
u32 reg ;
/* Setup link speed */
reg = advk_readl ( pcie , PCIE_CORE_CTRL0_REG ) ;
reg & = ~ PCIE_GEN_SEL_MSK ;
if ( gen = = 3 )
reg | = SPEED_GEN_3 ;
else if ( gen = = 2 )
reg | = SPEED_GEN_2 ;
else
reg | = SPEED_GEN_1 ;
advk_writel ( pcie , reg , PCIE_CORE_CTRL0_REG ) ;
/*
* Enable link training . This is not needed in every call to this
* function , just once suffices , but it does not break anything either .
*/
reg = advk_readl ( pcie , PCIE_CORE_CTRL0_REG ) ;
reg | = LINK_TRAINING_EN ;
advk_writel ( pcie , reg , PCIE_CORE_CTRL0_REG ) ;
/*
* Start link training immediately after enabling it .
* This solves problems for some buggy cards .
*/
2020-04-30 10:06:21 +02:00
reg = advk_readl ( pcie , PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL ) ;
reg | = PCI_EXP_LNKCTL_RL ;
advk_writel ( pcie , reg , PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL ) ;
2020-04-30 10:06:17 +02:00
ret = advk_pcie_wait_for_link ( pcie ) ;
if ( ret )
return ret ;
2020-04-30 10:06:21 +02:00
reg = advk_read16 ( pcie , PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKSTA ) ;
neg_gen = reg & PCI_EXP_LNKSTA_CLS ;
2020-04-30 10:06:17 +02:00
return neg_gen ;
}
static void advk_pcie_train_link ( struct advk_pcie * pcie )
{
struct device * dev = & pcie - > pdev - > dev ;
int neg_gen = - 1 , gen ;
2020-09-07 13:10:38 +02:00
/*
* Reset PCIe card via PERST # signal . Some cards are not detected
* during link training when they are in some non - initial state .
*/
advk_pcie_issue_perst ( pcie ) ;
/*
* PERST # signal could have been asserted by pinctrl subsystem before
* probe ( ) callback has been called or issued explicitly by reset gpio
* function advk_pcie_issue_perst ( ) , making the endpoint going into
* fundamental reset . As required by PCI Express spec a delay for at
* least 100 ms after such a reset before link training is needed .
*/
msleep ( PCI_PM_D3COLD_WAIT ) ;
2020-04-30 10:06:17 +02:00
/*
* Try link training at link gen specified by device tree property
* ' max - link - speed ' . If this fails , iteratively train at lower gen .
*/
for ( gen = pcie - > link_gen ; gen > 0 ; - - gen ) {
neg_gen = advk_pcie_train_at_gen ( pcie , gen ) ;
if ( neg_gen > 0 )
break ;
}
if ( neg_gen < 0 )
goto err ;
/*
* After successful training if negotiated gen is lower than requested ,
* train again on negotiated gen . This solves some stability issues for
* some buggy gen1 cards .
*/
if ( neg_gen < gen ) {
gen = neg_gen ;
neg_gen = advk_pcie_train_at_gen ( pcie , gen ) ;
}
if ( neg_gen = = gen ) {
dev_info ( dev , " link up at gen %i \n " , gen ) ;
return ;
}
err :
dev_err ( dev , " link never came up \n " ) ;
}
2016-06-30 11:32:31 +02:00
static void advk_pcie_setup_hw ( struct advk_pcie * pcie )
{
u32 reg ;
2020-04-30 10:06:20 +02:00
/* Enable TX */
reg = advk_readl ( pcie , PCIE_CORE_REF_CLK_REG ) ;
reg | = PCIE_CORE_REF_CLK_TX_ENABLE ;
advk_writel ( pcie , reg , PCIE_CORE_REF_CLK_REG ) ;
2016-06-30 11:32:31 +02:00
/* Set to Direct mode */
reg = advk_readl ( pcie , CTRL_CONFIG_REG ) ;
reg & = ~ ( CTRL_MODE_MASK < < CTRL_MODE_SHIFT ) ;
reg | = ( ( PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK ) < < CTRL_MODE_SHIFT ) ;
advk_writel ( pcie , reg , CTRL_CONFIG_REG ) ;
/* Set PCI global control register to RC mode */
reg = advk_readl ( pcie , PCIE_CORE_CTRL0_REG ) ;
reg | = ( IS_RC_MSK < < IS_RC_SHIFT ) ;
advk_writel ( pcie , reg , PCIE_CORE_CTRL0_REG ) ;
2021-06-25 00:26:20 +02:00
/*
* Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab .
* VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor
* id in high 16 bits . Updating this register changes readback value of
* read - only vendor id bits in PCIE_CORE_DEV_ID_REG register . Workaround
* for erratum 4.1 : " The value of device and vendor ID is incorrect " .
*/
reg = ( PCI_VENDOR_ID_MARVELL < < 16 ) | PCI_VENDOR_ID_MARVELL ;
advk_writel ( pcie , reg , VENDOR_ID_REG ) ;
2016-06-30 11:32:31 +02:00
/* Set Advanced Error Capabilities and Control PF0 register */
reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
PCIE_CORE_ERR_CAPCTL_ECRC_CHCK |
PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV ;
advk_writel ( pcie , reg , PCIE_CORE_ERR_CAPCTL_REG ) ;
2020-04-30 10:06:21 +02:00
/* Set PCIe Device Control register */
reg = advk_readl ( pcie , PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL ) ;
reg & = ~ PCI_EXP_DEVCTL_RELAX_EN ;
reg & = ~ PCI_EXP_DEVCTL_NOSNOOP_EN ;
reg & = ~ PCI_EXP_DEVCTL_READRQ ;
reg | = PCI_EXP_DEVCTL_PAYLOAD ; /* Set max payload size */
reg | = PCI_EXP_DEVCTL_READRQ_512B ;
advk_writel ( pcie , reg , PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL ) ;
2016-06-30 11:32:31 +02:00
/* Program PCIe Control 2 to disable strict ordering */
reg = PCIE_CORE_CTRL2_RESERVED |
PCIE_CORE_CTRL2_TD_ENABLE ;
advk_writel ( pcie , reg , PCIE_CORE_CTRL2_REG ) ;
/* Set lane X1 */
reg = advk_readl ( pcie , PCIE_CORE_CTRL0_REG ) ;
reg & = ~ LANE_CNT_MSK ;
reg | = LANE_COUNT_1 ;
advk_writel ( pcie , reg , PCIE_CORE_CTRL0_REG ) ;
/* Enable MSI */
reg = advk_readl ( pcie , PCIE_CORE_CTRL2_REG ) ;
reg | = PCIE_CORE_CTRL2_MSI_ENABLE ;
advk_writel ( pcie , reg , PCIE_CORE_CTRL2_REG ) ;
/* Clear all interrupts */
advk_writel ( pcie , PCIE_ISR0_ALL_MASK , PCIE_ISR0_REG ) ;
advk_writel ( pcie , PCIE_ISR1_ALL_MASK , PCIE_ISR1_REG ) ;
advk_writel ( pcie , PCIE_IRQ_ALL_MASK , HOST_CTRL_INT_STATUS_REG ) ;
/* Disable All ISR0/1 Sources */
reg = PCIE_ISR0_ALL_MASK ;
reg & = ~ PCIE_ISR0_MSI_INT_PENDING ;
advk_writel ( pcie , reg , PCIE_ISR0_MASK_REG ) ;
advk_writel ( pcie , PCIE_ISR1_ALL_MASK , PCIE_ISR1_MASK_REG ) ;
2019-05-30 08:05:58 -05:00
/* Unmask all MSIs */
2016-06-30 11:32:31 +02:00
advk_writel ( pcie , 0 , PCIE_MSI_MASK_REG ) ;
/* Enable summary interrupt for GIC SPI source */
reg = PCIE_IRQ_ALL_MASK & ( ~ PCIE_IRQ_ENABLE_INTS_MASK ) ;
advk_writel ( pcie , reg , HOST_CTRL_INT_MASK_REG ) ;
reg = advk_readl ( pcie , PCIE_CORE_CTRL2_REG ) ;
reg | = PCIE_CORE_CTRL2_OB_WIN_ENABLE ;
advk_writel ( pcie , reg , PCIE_CORE_CTRL2_REG ) ;
/* Bypass the address window mapping for PIO */
reg = advk_readl ( pcie , PIO_CTRL ) ;
reg | = PIO_CTRL_ADDR_WIN_DISABLE ;
advk_writel ( pcie , reg , PIO_CTRL ) ;
2020-04-30 10:06:17 +02:00
advk_pcie_train_link ( pcie ) ;
2016-06-30 11:32:31 +02:00
2020-04-30 10:06:19 +02:00
/*
* FIXME : The following register update is suspicious . This register is
* applicable only when the PCI controller is configured for Endpoint
* mode , not as a Root Complex . But apparently when this code is
* removed , some cards stop working . This should be investigated and
* a comment explaining this should be put here .
*/
2016-06-30 11:32:31 +02:00
reg = advk_readl ( pcie , PCIE_CORE_CMD_STATUS_REG ) ;
reg | = PCIE_CORE_CMD_MEM_ACCESS_EN |
PCIE_CORE_CMD_IO_ACCESS_EN |
PCIE_CORE_CMD_MEM_IO_REQ_EN ;
advk_writel ( pcie , reg , PCIE_CORE_CMD_STATUS_REG ) ;
}
static void advk_pcie_check_pio_status ( struct advk_pcie * pcie )
{
2016-10-06 13:27:46 -05:00
struct device * dev = & pcie - > pdev - > dev ;
2016-06-30 11:32:31 +02:00
u32 reg ;
unsigned int status ;
char * strcomp_status , * str_posted ;
reg = advk_readl ( pcie , PIO_STAT ) ;
status = ( reg & PIO_COMPLETION_STATUS_MASK ) > >
PIO_COMPLETION_STATUS_SHIFT ;
if ( ! status )
return ;
switch ( status ) {
case PIO_COMPLETION_STATUS_UR :
strcomp_status = " UR " ;
break ;
case PIO_COMPLETION_STATUS_CRS :
strcomp_status = " CRS " ;
break ;
case PIO_COMPLETION_STATUS_CA :
strcomp_status = " CA " ;
break ;
default :
strcomp_status = " Unknown " ;
break ;
}
if ( reg & PIO_NON_POSTED_REQ )
str_posted = " Non-posted " ;
else
str_posted = " Posted " ;
2016-10-06 13:27:46 -05:00
dev_err ( dev , " %s PIO Response Status: %s, %#x @ %#x \n " ,
2016-06-30 11:32:31 +02:00
str_posted , strcomp_status , reg , advk_readl ( pcie , PIO_ADDR_LS ) ) ;
}
static int advk_pcie_wait_pio ( struct advk_pcie * pcie )
{
2016-10-06 13:27:46 -05:00
struct device * dev = & pcie - > pdev - > dev ;
2019-09-27 10:55:02 +02:00
int i ;
2016-06-30 11:32:31 +02:00
2019-09-27 10:55:02 +02:00
for ( i = 0 ; i < PIO_RETRY_CNT ; i + + ) {
2016-06-30 11:32:31 +02:00
u32 start , isr ;
start = advk_readl ( pcie , PIO_START ) ;
isr = advk_readl ( pcie , PIO_ISR ) ;
if ( ! start & & isr )
return 0 ;
2019-09-27 10:55:02 +02:00
udelay ( PIO_RETRY_DELAY ) ;
2016-06-30 11:32:31 +02:00
}
2021-06-08 22:36:55 +02:00
dev_err ( dev , " PIO read/write transfer time out \n " ) ;
2016-06-30 11:32:31 +02:00
return - ETIMEDOUT ;
}
2018-10-18 17:37:19 +02:00
static pci_bridge_emul_read_status_t
advk_pci_bridge_emul_pcie_conf_read ( struct pci_bridge_emul * bridge ,
int reg , u32 * value )
{
struct advk_pcie * pcie = bridge - > data ;
switch ( reg ) {
case PCI_EXP_SLTCTL :
* value = PCI_EXP_SLTSTA_PDS < < 16 ;
return PCI_BRIDGE_EMUL_HANDLED ;
case PCI_EXP_RTCTL : {
u32 val = advk_readl ( pcie , PCIE_ISR0_MASK_REG ) ;
2019-06-14 12:10:59 +02:00
* value = ( val & PCIE_MSG_PM_PME_MASK ) ? 0 : PCI_EXP_RTCTL_PMEIE ;
2018-10-18 17:37:19 +02:00
return PCI_BRIDGE_EMUL_HANDLED ;
}
case PCI_EXP_RTSTA : {
u32 isr0 = advk_readl ( pcie , PCIE_ISR0_REG ) ;
u32 msglog = advk_readl ( pcie , PCIE_MSG_LOG_REG ) ;
* value = ( isr0 & PCIE_MSG_PM_PME_MASK ) < < 16 | ( msglog > > 16 ) ;
return PCI_BRIDGE_EMUL_HANDLED ;
}
2019-05-22 23:33:51 +02:00
case PCI_EXP_LNKCTL : {
/* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
u32 val = advk_readl ( pcie , PCIE_CORE_PCIEXP_CAP + reg ) &
~ ( PCI_EXP_LNKSTA_LT < < 16 ) ;
if ( ! advk_pcie_link_up ( pcie ) )
val | = ( PCI_EXP_LNKSTA_LT < < 16 ) ;
* value = val ;
return PCI_BRIDGE_EMUL_HANDLED ;
}
2018-10-18 17:37:19 +02:00
case PCI_CAP_LIST_ID :
case PCI_EXP_DEVCAP :
case PCI_EXP_DEVCTL :
case PCI_EXP_LNKCAP :
* value = advk_readl ( pcie , PCIE_CORE_PCIEXP_CAP + reg ) ;
return PCI_BRIDGE_EMUL_HANDLED ;
default :
return PCI_BRIDGE_EMUL_NOT_HANDLED ;
}
}
static void
advk_pci_bridge_emul_pcie_conf_write ( struct pci_bridge_emul * bridge ,
int reg , u32 old , u32 new , u32 mask )
{
struct advk_pcie * pcie = bridge - > data ;
switch ( reg ) {
case PCI_EXP_DEVCTL :
2019-05-22 23:33:51 +02:00
advk_writel ( pcie , new , PCIE_CORE_PCIEXP_CAP + reg ) ;
break ;
2018-10-18 17:37:19 +02:00
case PCI_EXP_LNKCTL :
advk_writel ( pcie , new , PCIE_CORE_PCIEXP_CAP + reg ) ;
2019-05-22 23:33:51 +02:00
if ( new & PCI_EXP_LNKCTL_RL )
advk_pcie_wait_for_retrain ( pcie ) ;
2018-10-18 17:37:19 +02:00
break ;
2019-06-14 12:10:59 +02:00
case PCI_EXP_RTCTL : {
/* Only mask/unmask PME interrupt */
u32 val = advk_readl ( pcie , PCIE_ISR0_MASK_REG ) &
~ PCIE_MSG_PM_PME_MASK ;
if ( ( new & PCI_EXP_RTCTL_PMEIE ) = = 0 )
val | = PCIE_MSG_PM_PME_MASK ;
advk_writel ( pcie , val , PCIE_ISR0_MASK_REG ) ;
2018-10-18 17:37:19 +02:00
break ;
2019-06-14 12:10:59 +02:00
}
2018-10-18 17:37:19 +02:00
case PCI_EXP_RTSTA :
new = ( new & PCI_EXP_RTSTA_PME ) > > 9 ;
advk_writel ( pcie , new , PCIE_ISR0_REG ) ;
break ;
default :
break ;
}
}
2019-03-01 06:58:09 +00:00
static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
2018-10-18 17:37:19 +02:00
. read_pcie = advk_pci_bridge_emul_pcie_conf_read ,
. write_pcie = advk_pci_bridge_emul_pcie_conf_write ,
} ;
/*
* Initialize the configuration space of the PCI - to - PCI bridge
* associated with the given PCIe interface .
*/
2020-09-07 13:10:35 +02:00
static int advk_sw_pci_bridge_init ( struct advk_pcie * pcie )
2018-10-18 17:37:19 +02:00
{
struct pci_bridge_emul * bridge = & pcie - > bridge ;
2019-07-16 14:12:07 +02:00
bridge - > conf . vendor =
cpu_to_le16 ( advk_readl ( pcie , PCIE_CORE_DEV_ID_REG ) & 0xffff ) ;
bridge - > conf . device =
cpu_to_le16 ( advk_readl ( pcie , PCIE_CORE_DEV_ID_REG ) > > 16 ) ;
2018-10-18 17:37:19 +02:00
bridge - > conf . class_revision =
2019-07-16 14:12:07 +02:00
cpu_to_le32 ( advk_readl ( pcie , PCIE_CORE_DEV_REV_REG ) & 0xff ) ;
2018-10-18 17:37:19 +02:00
/* Support 32 bits I/O addressing */
bridge - > conf . iobase = PCI_IO_RANGE_TYPE_32 ;
bridge - > conf . iolimit = PCI_IO_RANGE_TYPE_32 ;
/* Support 64 bits memory pref */
2019-07-16 14:12:07 +02:00
bridge - > conf . pref_mem_base = cpu_to_le16 ( PCI_PREF_RANGE_TYPE_64 ) ;
bridge - > conf . pref_mem_limit = cpu_to_le16 ( PCI_PREF_RANGE_TYPE_64 ) ;
2018-10-18 17:37:19 +02:00
/* Support interrupt A for MSI feature */
bridge - > conf . intpin = PCIE_CORE_INT_A_ASSERT_ENABLE ;
bridge - > has_pcie = true ;
bridge - > data = pcie ;
bridge - > ops = & advk_pci_bridge_emul_ops ;
2020-09-07 13:10:35 +02:00
return pci_bridge_emul_init ( bridge , 0 ) ;
2018-10-18 17:37:19 +02:00
}
2018-04-06 16:55:35 +02:00
static bool advk_pcie_valid_device ( struct advk_pcie * pcie , struct pci_bus * bus ,
int devfn )
{
2020-07-21 20:24:59 -06:00
if ( pci_is_root_bus ( bus ) & & PCI_SLOT ( devfn ) ! = 0 )
2018-04-06 16:55:35 +02:00
return false ;
2020-07-02 10:30:36 +02:00
/*
* If the link goes down after we check for link - up , nothing bad
* happens but the config access times out .
*/
Merge branch 'pci/host-probe-refactor'
- Use pci_host_bridge.windows list directly instead of splicing in a
temporary list for cadence, mvebu, host-common (Rob Herring)
- Use pci_host_probe() instead of open-coding all the pieces for altera,
brcmstb, iproc, mobiveil, rcar, rockchip, tegra, v3, versatile, xgene,
xilinx, xilinx-nwl (Rob Herring)
- Convert to devm_platform_ioremap_resource_byname() instead of open-coding
platform_get_resource_byname() and devm_ioremap_resource() for altera,
cadence, mediatek, rockchip, tegra, xgene (Dejin Zheng)
- Convert to devm_platform_ioremap_resource() instead of open-coding
platform_get_resource() and devm_ioremap_resource() for aardvark,
brcmstb, exynos, ftpci100, versatile (Dejin Zheng)
- Remove redundant error messages from devm_pci_remap_cfg_resource()
callers (Dejin Zheng)
- Drop useless PCI_ENABLE_PROC_DOMAINS from versatile driver (Rob Herring)
- Default host bridge parent device to the platform device (Rob Herring)
- Drop unnecessary zeroing of host bridge fields (Rob Herring)
- Use pci_is_root_bus() instead of tracking root bus number separately in
aardvark, designware (imx6, keystone, designware-host), mobiveil,
xilinx-nwl, xilinx, rockchip, rcar (Rob Herring)
- Set host bridge bus number in pci_scan_root_bus_bridge() instead of each
driver for aardvark, designware-host, host-common, mediatek, rcar, tegra,
v3-semi (Rob Herring)
- Use bridge resources instead of parsing DT 'ranges' again for cadence
(Rob Herring)
- Remove private bus number and range from cadence (Rob Herring)
- Use devm_pci_alloc_host_bridge() to simplify rcar (Rob Herring)
- Use struct pci_host_bridge.windows list directly rather than a temporary
(Rob Herring)
- Reduce OF "missing non-prefetchable window" from error to warning message
(Rob Herring)
- Convert rcar-gen2 from old Arm-specific pci_common_init_dev() to new
arch-independent interfaces (Rob Herring)
- Move DT resource setup into devm_pci_alloc_host_bridge() (Rob Herring)
- Set bridge map_irq and swizzle_irq to default functions; drivers that
don't support legacy IRQs (iproc) need to undo this (Rob Herring)
* pci/host-probe-refactor:
PCI: Set bridge map_irq and swizzle_irq to default functions
PCI: Move DT resource setup into devm_pci_alloc_host_bridge()
PCI: rcar-gen2: Convert to use modern host bridge probe functions
PCI: of: Reduce missing non-prefetchable memory region to a warning
PCI: rcar: Use struct pci_host_bridge.windows list directly
PCI: rcar: Use devm_pci_alloc_host_bridge()
PCI: cadence: Remove private bus number and range storage
PCI: cadence: Use bridge resources for outbound window setup
PCI: Move setting pci_host_bridge.busnr out of host drivers
PCI: rcar: Use pci_is_root_bus() to check if bus is root bus
PCI: rockchip: Use pci_is_root_bus() to check if bus is root bus
PCI: xilinx: Use pci_is_root_bus() to check if bus is root bus
PCI: xilinx-nwl: Use pci_is_root_bus() to check if bus is root bus
PCI: mobiveil: Use pci_is_root_bus() to check if bus is root bus
PCI: designware: Use pci_is_root_bus() to check if bus is root bus
PCI: aardvark: Use pci_is_root_bus() to check if bus is root bus
PCI: Drop unnecessary zeroing of bridge fields
PCI: Set default bridge parent device
PCI: versatile: Drop flag PCI_ENABLE_PROC_DOMAINS
PCI: controller: Remove duplicate error message
PCI: controller: Convert to devm_platform_ioremap_resource()
PCI: controller: Convert to devm_platform_ioremap_resource_byname()
PCI: xilinx: Use pci_host_probe() to register host
PCI: xilinx-nwl: Use pci_host_probe() to register host
PCI: rockchip: Use pci_host_probe() to register host
PCI: rcar: Use pci_host_probe() to register host
PCI: iproc: Use pci_host_probe() to register host
PCI: altera: Use pci_host_probe() to register host
PCI: xgene: Use pci_host_probe() to register host
PCI: versatile: Use pci_host_probe() to register host
PCI: v3: Use pci_host_probe() to register host
PCI: tegra: Use pci_host_probe() to register host
PCI: mobiveil: Use pci_host_probe() to register host
PCI: brcmstb: Use pci_host_probe() to register host
PCI: host-common: Use struct pci_host_bridge.windows list directly
PCI: mvebu: Use struct pci_host_bridge.windows list directly
PCI: cadence: Use struct pci_host_bridge.windows list directly
# Conflicts:
# drivers/pci/controller/cadence/pcie-cadence-host.c
2020-08-05 18:24:21 -05:00
if ( ! pci_is_root_bus ( bus ) & & ! advk_pcie_link_up ( pcie ) )
2020-07-02 10:30:36 +02:00
return false ;
2018-04-06 16:55:35 +02:00
return true ;
}
2021-06-08 22:36:55 +02:00
static bool advk_pcie_pio_is_running ( struct advk_pcie * pcie )
{
struct device * dev = & pcie - > pdev - > dev ;
/*
* Trying to start a new PIO transfer when previous has not completed
* cause External Abort on CPU which results in kernel panic :
*
* SError Interrupt on CPU0 , code 0xbf000002 - - SError
* Kernel panic - not syncing : Asynchronous SError Interrupt
*
* Functions advk_pcie_rd_conf ( ) and advk_pcie_wr_conf ( ) are protected
* by raw_spin_lock_irqsave ( ) at pci_lock_config ( ) level to prevent
* concurrent calls at the same time . But because PIO transfer may take
* about 1.5 s when link is down or card is disconnected , it means that
* advk_pcie_wait_pio ( ) does not always have to wait for completion .
*
* Some versions of ARM Trusted Firmware handles this External Abort at
* EL3 level and mask it to prevent kernel panic . Relevant TF - A commit :
* https : //git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
*/
if ( advk_readl ( pcie , PIO_START ) ) {
dev_err ( dev , " Previous PIO read/write transfer is still running \n " ) ;
return true ;
}
return false ;
}
2016-06-30 11:32:31 +02:00
static int advk_pcie_rd_conf ( struct pci_bus * bus , u32 devfn ,
int where , int size , u32 * val )
{
struct advk_pcie * pcie = bus - > sysdata ;
u32 reg ;
int ret ;
2018-04-06 16:55:35 +02:00
if ( ! advk_pcie_valid_device ( pcie , bus , devfn ) ) {
2016-06-30 11:32:31 +02:00
* val = 0xffffffff ;
return PCIBIOS_DEVICE_NOT_FOUND ;
}
2020-07-21 20:24:59 -06:00
if ( pci_is_root_bus ( bus ) )
2018-10-18 17:37:19 +02:00
return pci_bridge_emul_conf_read ( & pcie - > bridge , where ,
size , val ) ;
2021-06-08 22:36:55 +02:00
if ( advk_pcie_pio_is_running ( pcie ) ) {
* val = 0xffffffff ;
return PCIBIOS_SET_FAILED ;
}
2016-06-30 11:32:31 +02:00
/* Program the control register */
reg = advk_readl ( pcie , PIO_CTRL ) ;
reg & = ~ PIO_CTRL_TYPE_MASK ;
2020-07-21 20:24:59 -06:00
if ( pci_is_root_bus ( bus - > parent ) )
2016-06-30 11:32:31 +02:00
reg | = PCIE_CONFIG_RD_TYPE0 ;
else
reg | = PCIE_CONFIG_RD_TYPE1 ;
advk_writel ( pcie , reg , PIO_CTRL ) ;
/* Program the address registers */
2020-11-29 23:07:39 +00:00
reg = ALIGN_DOWN ( PCIE_ECAM_OFFSET ( bus - > number , devfn , where ) , 4 ) ;
2016-06-30 11:32:31 +02:00
advk_writel ( pcie , reg , PIO_ADDR_LS ) ;
advk_writel ( pcie , 0 , PIO_ADDR_MS ) ;
/* Program the data strobe */
advk_writel ( pcie , 0xf , PIO_WR_DATA_STRB ) ;
2021-06-08 22:36:55 +02:00
/* Clear PIO DONE ISR and start the transfer */
advk_writel ( pcie , 1 , PIO_ISR ) ;
2016-06-30 11:32:31 +02:00
advk_writel ( pcie , 1 , PIO_START ) ;
ret = advk_pcie_wait_pio ( pcie ) ;
2020-06-01 15:03:15 +02:00
if ( ret < 0 ) {
* val = 0xffffffff ;
2016-06-30 11:32:31 +02:00
return PCIBIOS_SET_FAILED ;
2020-06-01 15:03:15 +02:00
}
2016-06-30 11:32:31 +02:00
advk_pcie_check_pio_status ( pcie ) ;
/* Get the read result */
* val = advk_readl ( pcie , PIO_RD_DATA ) ;
if ( size = = 1 )
* val = ( * val > > ( 8 * ( where & 3 ) ) ) & 0xff ;
else if ( size = = 2 )
* val = ( * val > > ( 8 * ( where & 3 ) ) ) & 0xffff ;
return PCIBIOS_SUCCESSFUL ;
}
static int advk_pcie_wr_conf ( struct pci_bus * bus , u32 devfn ,
int where , int size , u32 val )
{
struct advk_pcie * pcie = bus - > sysdata ;
u32 reg ;
u32 data_strobe = 0x0 ;
int offset ;
int ret ;
2018-04-06 16:55:35 +02:00
if ( ! advk_pcie_valid_device ( pcie , bus , devfn ) )
2016-06-30 11:32:31 +02:00
return PCIBIOS_DEVICE_NOT_FOUND ;
2020-07-21 20:24:59 -06:00
if ( pci_is_root_bus ( bus ) )
2018-10-18 17:37:19 +02:00
return pci_bridge_emul_conf_write ( & pcie - > bridge , where ,
size , val ) ;
2016-06-30 11:32:31 +02:00
if ( where % size )
return PCIBIOS_SET_FAILED ;
2021-06-08 22:36:55 +02:00
if ( advk_pcie_pio_is_running ( pcie ) )
return PCIBIOS_SET_FAILED ;
2016-06-30 11:32:31 +02:00
/* Program the control register */
reg = advk_readl ( pcie , PIO_CTRL ) ;
reg & = ~ PIO_CTRL_TYPE_MASK ;
2020-07-21 20:24:59 -06:00
if ( pci_is_root_bus ( bus - > parent ) )
2016-06-30 11:32:31 +02:00
reg | = PCIE_CONFIG_WR_TYPE0 ;
else
reg | = PCIE_CONFIG_WR_TYPE1 ;
advk_writel ( pcie , reg , PIO_CTRL ) ;
/* Program the address registers */
2020-11-29 23:07:39 +00:00
reg = ALIGN_DOWN ( PCIE_ECAM_OFFSET ( bus - > number , devfn , where ) , 4 ) ;
2016-06-30 11:32:31 +02:00
advk_writel ( pcie , reg , PIO_ADDR_LS ) ;
advk_writel ( pcie , 0 , PIO_ADDR_MS ) ;
/* Calculate the write strobe */
offset = where & 0x3 ;
reg = val < < ( 8 * offset ) ;
data_strobe = GENMASK ( size - 1 , 0 ) < < offset ;
/* Program the data register */
advk_writel ( pcie , reg , PIO_WR_DATA ) ;
/* Program the data strobe */
advk_writel ( pcie , data_strobe , PIO_WR_DATA_STRB ) ;
2021-06-08 22:36:55 +02:00
/* Clear PIO DONE ISR and start the transfer */
advk_writel ( pcie , 1 , PIO_ISR ) ;
2016-06-30 11:32:31 +02:00
advk_writel ( pcie , 1 , PIO_START ) ;
ret = advk_pcie_wait_pio ( pcie ) ;
if ( ret < 0 )
return PCIBIOS_SET_FAILED ;
advk_pcie_check_pio_status ( pcie ) ;
return PCIBIOS_SUCCESSFUL ;
}
static struct pci_ops advk_pcie_ops = {
. read = advk_pcie_rd_conf ,
. write = advk_pcie_wr_conf ,
} ;
2017-02-28 15:31:14 +01:00
static void advk_msi_irq_compose_msi_msg ( struct irq_data * data ,
struct msi_msg * msg )
2016-06-30 11:32:31 +02:00
{
2017-02-28 15:31:14 +01:00
struct advk_pcie * pcie = irq_data_get_irq_chip_data ( data ) ;
phys_addr_t msi_msg = virt_to_phys ( & pcie - > msi_msg ) ;
2016-06-30 11:32:31 +02:00
2017-02-28 15:31:14 +01:00
msg - > address_lo = lower_32_bits ( msi_msg ) ;
msg - > address_hi = upper_32_bits ( msi_msg ) ;
msg - > data = data - > irq ;
2016-06-30 11:32:31 +02:00
}
2017-02-28 15:31:14 +01:00
static int advk_msi_set_affinity ( struct irq_data * irq_data ,
const struct cpumask * mask , bool force )
2016-06-30 11:32:31 +02:00
{
2017-02-28 15:31:14 +01:00
return - EINVAL ;
2016-06-30 11:32:31 +02:00
}
2017-02-28 15:31:14 +01:00
static int advk_msi_irq_domain_alloc ( struct irq_domain * domain ,
unsigned int virq ,
unsigned int nr_irqs , void * args )
2016-06-30 11:32:31 +02:00
{
2017-02-28 15:31:14 +01:00
struct advk_pcie * pcie = domain - > host_data ;
int hwirq , i ;
2016-06-30 11:32:31 +02:00
2017-02-28 15:31:14 +01:00
mutex_lock ( & pcie - > msi_used_lock ) ;
hwirq = bitmap_find_next_zero_area ( pcie - > msi_used , MSI_IRQ_NUM ,
0 , nr_irqs , 0 ) ;
if ( hwirq > = MSI_IRQ_NUM ) {
mutex_unlock ( & pcie - > msi_used_lock ) ;
return - ENOSPC ;
2016-06-30 11:32:31 +02:00
}
2017-02-28 15:31:14 +01:00
bitmap_set ( pcie - > msi_used , hwirq , nr_irqs ) ;
mutex_unlock ( & pcie - > msi_used_lock ) ;
2016-06-30 11:32:31 +02:00
2017-02-28 15:31:14 +01:00
for ( i = 0 ; i < nr_irqs ; i + + )
irq_domain_set_info ( domain , virq + i , hwirq + i ,
& pcie - > msi_bottom_irq_chip ,
domain - > host_data , handle_simple_irq ,
NULL , NULL ) ;
2016-06-30 11:32:31 +02:00
2017-02-28 15:31:14 +01:00
return hwirq ;
2016-06-30 11:32:31 +02:00
}
2017-02-28 15:31:14 +01:00
static void advk_msi_irq_domain_free ( struct irq_domain * domain ,
unsigned int virq , unsigned int nr_irqs )
2016-06-30 11:32:31 +02:00
{
2017-02-28 15:31:14 +01:00
struct irq_data * d = irq_domain_get_irq_data ( domain , virq ) ;
2016-06-30 11:32:31 +02:00
struct advk_pcie * pcie = domain - > host_data ;
2017-02-28 15:31:14 +01:00
mutex_lock ( & pcie - > msi_used_lock ) ;
bitmap_clear ( pcie - > msi_used , d - > hwirq , nr_irqs ) ;
mutex_unlock ( & pcie - > msi_used_lock ) ;
2016-06-30 11:32:31 +02:00
}
2017-02-28 15:31:14 +01:00
static const struct irq_domain_ops advk_msi_domain_ops = {
. alloc = advk_msi_irq_domain_alloc ,
. free = advk_msi_irq_domain_free ,
2016-06-30 11:32:31 +02:00
} ;
static void advk_pcie_irq_mask ( struct irq_data * d )
{
struct advk_pcie * pcie = d - > domain - > host_data ;
irq_hw_number_t hwirq = irqd_to_hwirq ( d ) ;
u32 mask ;
2018-04-06 16:55:33 +02:00
mask = advk_readl ( pcie , PCIE_ISR1_MASK_REG ) ;
mask | = PCIE_ISR1_INTX_ASSERT ( hwirq ) ;
advk_writel ( pcie , mask , PCIE_ISR1_MASK_REG ) ;
2016-06-30 11:32:31 +02:00
}
static void advk_pcie_irq_unmask ( struct irq_data * d )
{
struct advk_pcie * pcie = d - > domain - > host_data ;
irq_hw_number_t hwirq = irqd_to_hwirq ( d ) ;
u32 mask ;
2018-04-06 16:55:33 +02:00
mask = advk_readl ( pcie , PCIE_ISR1_MASK_REG ) ;
mask & = ~ PCIE_ISR1_INTX_ASSERT ( hwirq ) ;
advk_writel ( pcie , mask , PCIE_ISR1_MASK_REG ) ;
2016-06-30 11:32:31 +02:00
}
static int advk_pcie_irq_map ( struct irq_domain * h ,
unsigned int virq , irq_hw_number_t hwirq )
{
struct advk_pcie * pcie = h - > host_data ;
advk_pcie_irq_mask ( irq_get_irq_data ( virq ) ) ;
irq_set_status_flags ( virq , IRQ_LEVEL ) ;
irq_set_chip_and_handler ( virq , & pcie - > irq_chip ,
handle_level_irq ) ;
irq_set_chip_data ( virq , pcie ) ;
return 0 ;
}
static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
. map = advk_pcie_irq_map ,
. xlate = irq_domain_xlate_onecell ,
} ;
static int advk_pcie_init_msi_irq_domain ( struct advk_pcie * pcie )
{
struct device * dev = & pcie - > pdev - > dev ;
struct device_node * node = dev - > of_node ;
2017-02-28 15:31:14 +01:00
struct irq_chip * bottom_ic , * msi_ic ;
struct msi_domain_info * msi_di ;
2016-06-30 11:32:31 +02:00
phys_addr_t msi_msg_phys ;
2017-02-28 15:31:14 +01:00
mutex_init ( & pcie - > msi_used_lock ) ;
2016-06-30 11:32:31 +02:00
2017-02-28 15:31:14 +01:00
bottom_ic = & pcie - > msi_bottom_irq_chip ;
2016-06-30 11:32:31 +02:00
2017-02-28 15:31:14 +01:00
bottom_ic - > name = " MSI " ;
bottom_ic - > irq_compose_msi_msg = advk_msi_irq_compose_msi_msg ;
bottom_ic - > irq_set_affinity = advk_msi_set_affinity ;
2016-06-30 11:32:31 +02:00
2017-02-28 15:31:14 +01:00
msi_ic = & pcie - > msi_irq_chip ;
msi_ic - > name = " advk-MSI " ;
2016-06-30 11:32:31 +02:00
2017-02-28 15:31:14 +01:00
msi_di = & pcie - > msi_domain_info ;
msi_di - > flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_MULTI_PCI_MSI ;
msi_di - > chip = msi_ic ;
2016-06-30 11:32:31 +02:00
msi_msg_phys = virt_to_phys ( & pcie - > msi_msg ) ;
advk_writel ( pcie , lower_32_bits ( msi_msg_phys ) ,
PCIE_MSI_ADDR_LOW_REG ) ;
advk_writel ( pcie , upper_32_bits ( msi_msg_phys ) ,
PCIE_MSI_ADDR_HIGH_REG ) ;
2017-02-28 15:31:14 +01:00
pcie - > msi_inner_domain =
2016-06-30 11:32:31 +02:00
irq_domain_add_linear ( NULL , MSI_IRQ_NUM ,
2017-02-28 15:31:14 +01:00
& advk_msi_domain_ops , pcie ) ;
if ( ! pcie - > msi_inner_domain )
2016-06-30 11:32:31 +02:00
return - ENOMEM ;
2017-02-28 15:31:14 +01:00
pcie - > msi_domain =
pci_msi_create_irq_domain ( of_node_to_fwnode ( node ) ,
msi_di , pcie - > msi_inner_domain ) ;
if ( ! pcie - > msi_domain ) {
irq_domain_remove ( pcie - > msi_inner_domain ) ;
return - ENOMEM ;
2016-06-30 11:32:31 +02:00
}
return 0 ;
}
static void advk_pcie_remove_msi_irq_domain ( struct advk_pcie * pcie )
{
irq_domain_remove ( pcie - > msi_domain ) ;
2017-02-28 15:31:14 +01:00
irq_domain_remove ( pcie - > msi_inner_domain ) ;
2016-06-30 11:32:31 +02:00
}
static int advk_pcie_init_irq_domain ( struct advk_pcie * pcie )
{
struct device * dev = & pcie - > pdev - > dev ;
struct device_node * node = dev - > of_node ;
struct device_node * pcie_intc_node ;
struct irq_chip * irq_chip ;
2019-02-27 12:40:40 +08:00
int ret = 0 ;
2016-06-30 11:32:31 +02:00
pcie_intc_node = of_get_next_child ( node , NULL ) ;
if ( ! pcie_intc_node ) {
dev_err ( dev , " No PCIe Intc node found \n " ) ;
return - ENODEV ;
}
irq_chip = & pcie - > irq_chip ;
irq_chip - > name = devm_kasprintf ( dev , GFP_KERNEL , " %s-irq " ,
dev_name ( dev ) ) ;
if ( ! irq_chip - > name ) {
2019-02-27 12:40:40 +08:00
ret = - ENOMEM ;
goto out_put_node ;
2016-06-30 11:32:31 +02:00
}
irq_chip - > irq_mask = advk_pcie_irq_mask ;
irq_chip - > irq_mask_ack = advk_pcie_irq_mask ;
irq_chip - > irq_unmask = advk_pcie_irq_unmask ;
pcie - > irq_domain =
2017-08-15 16:26:03 -05:00
irq_domain_add_linear ( pcie_intc_node , PCI_NUM_INTX ,
2016-06-30 11:32:31 +02:00
& advk_pcie_irq_domain_ops , pcie ) ;
if ( ! pcie - > irq_domain ) {
dev_err ( dev , " Failed to get a INTx IRQ domain \n " ) ;
2019-02-27 12:40:40 +08:00
ret = - ENOMEM ;
goto out_put_node ;
2016-06-30 11:32:31 +02:00
}
2019-02-27 12:40:40 +08:00
out_put_node :
of_node_put ( pcie_intc_node ) ;
return ret ;
2016-06-30 11:32:31 +02:00
}
static void advk_pcie_remove_irq_domain ( struct advk_pcie * pcie )
{
irq_domain_remove ( pcie - > irq_domain ) ;
}
static void advk_pcie_handle_msi ( struct advk_pcie * pcie )
{
u32 msi_val , msi_mask , msi_status , msi_idx ;
u16 msi_data ;
msi_mask = advk_readl ( pcie , PCIE_MSI_MASK_REG ) ;
msi_val = advk_readl ( pcie , PCIE_MSI_STATUS_REG ) ;
msi_status = msi_val & ~ msi_mask ;
for ( msi_idx = 0 ; msi_idx < MSI_IRQ_NUM ; msi_idx + + ) {
if ( ! ( BIT ( msi_idx ) & msi_status ) )
continue ;
advk_writel ( pcie , BIT ( msi_idx ) , PCIE_MSI_STATUS_REG ) ;
msi_data = advk_readl ( pcie , PCIE_MSI_PAYLOAD_REG ) & 0xFF ;
generic_handle_irq ( msi_data ) ;
}
advk_writel ( pcie , PCIE_ISR0_MSI_INT_PENDING ,
PCIE_ISR0_REG ) ;
}
static void advk_pcie_handle_int ( struct advk_pcie * pcie )
{
2018-04-06 16:55:33 +02:00
u32 isr0_val , isr0_mask , isr0_status ;
u32 isr1_val , isr1_mask , isr1_status ;
2016-06-30 11:32:31 +02:00
int i , virq ;
2018-04-06 16:55:33 +02:00
isr0_val = advk_readl ( pcie , PCIE_ISR0_REG ) ;
isr0_mask = advk_readl ( pcie , PCIE_ISR0_MASK_REG ) ;
isr0_status = isr0_val & ( ( ~ isr0_mask ) & PCIE_ISR0_ALL_MASK ) ;
2016-06-30 11:32:31 +02:00
2018-04-06 16:55:33 +02:00
isr1_val = advk_readl ( pcie , PCIE_ISR1_REG ) ;
isr1_mask = advk_readl ( pcie , PCIE_ISR1_MASK_REG ) ;
isr1_status = isr1_val & ( ( ~ isr1_mask ) & PCIE_ISR1_ALL_MASK ) ;
if ( ! isr0_status & & ! isr1_status ) {
advk_writel ( pcie , isr0_val , PCIE_ISR0_REG ) ;
advk_writel ( pcie , isr1_val , PCIE_ISR1_REG ) ;
2016-06-30 11:32:31 +02:00
return ;
}
/* Process MSI interrupts */
2018-04-06 16:55:33 +02:00
if ( isr0_status & PCIE_ISR0_MSI_INT_PENDING )
2016-06-30 11:32:31 +02:00
advk_pcie_handle_msi ( pcie ) ;
/* Process legacy interrupts */
2017-08-15 16:26:03 -05:00
for ( i = 0 ; i < PCI_NUM_INTX ; i + + ) {
2018-04-06 16:55:33 +02:00
if ( ! ( isr1_status & PCIE_ISR1_INTX_ASSERT ( i ) ) )
2016-06-30 11:32:31 +02:00
continue ;
2018-04-06 16:55:33 +02:00
advk_writel ( pcie , PCIE_ISR1_INTX_ASSERT ( i ) ,
PCIE_ISR1_REG ) ;
2016-06-30 11:32:31 +02:00
virq = irq_find_mapping ( pcie - > irq_domain , i ) ;
generic_handle_irq ( virq ) ;
}
}
static irqreturn_t advk_pcie_irq_handler ( int irq , void * arg )
{
struct advk_pcie * pcie = arg ;
u32 status ;
status = advk_readl ( pcie , HOST_CTRL_INT_STATUS_REG ) ;
if ( ! ( status & PCIE_IRQ_CORE_INT ) )
return IRQ_NONE ;
advk_pcie_handle_int ( pcie ) ;
/* Clear interrupt */
advk_writel ( pcie , PCIE_IRQ_CORE_INT , HOST_CTRL_INT_STATUS_REG ) ;
return IRQ_HANDLED ;
}
2020-04-30 10:06:20 +02:00
static void __maybe_unused advk_pcie_disable_phy ( struct advk_pcie * pcie )
{
phy_power_off ( pcie - > phy ) ;
phy_exit ( pcie - > phy ) ;
}
static int advk_pcie_enable_phy ( struct advk_pcie * pcie )
{
int ret ;
if ( ! pcie - > phy )
return 0 ;
ret = phy_init ( pcie - > phy ) ;
if ( ret )
return ret ;
ret = phy_set_mode ( pcie - > phy , PHY_MODE_PCIE ) ;
if ( ret ) {
phy_exit ( pcie - > phy ) ;
return ret ;
}
ret = phy_power_on ( pcie - > phy ) ;
2020-09-02 16:43:44 +02:00
if ( ret = = - EOPNOTSUPP ) {
dev_warn ( & pcie - > pdev - > dev , " PHY unsupported by firmware \n " ) ;
} else if ( ret ) {
2020-04-30 10:06:20 +02:00
phy_exit ( pcie - > phy ) ;
return ret ;
}
return 0 ;
}
static int advk_pcie_setup_phy ( struct advk_pcie * pcie )
{
struct device * dev = & pcie - > pdev - > dev ;
struct device_node * node = dev - > of_node ;
int ret = 0 ;
pcie - > phy = devm_of_phy_get ( dev , node , NULL ) ;
if ( IS_ERR ( pcie - > phy ) & & ( PTR_ERR ( pcie - > phy ) = = - EPROBE_DEFER ) )
return PTR_ERR ( pcie - > phy ) ;
/* Old bindings miss the PHY handle */
if ( IS_ERR ( pcie - > phy ) ) {
dev_warn ( dev , " PHY unavailable (%ld) \n " , PTR_ERR ( pcie - > phy ) ) ;
pcie - > phy = NULL ;
return 0 ;
}
ret = advk_pcie_enable_phy ( pcie ) ;
if ( ret )
dev_err ( dev , " Failed to initialize PHY (%d) \n " , ret ) ;
return ret ;
}
2016-06-30 11:32:31 +02:00
static int advk_pcie_probe ( struct platform_device * pdev )
{
2016-10-06 13:27:46 -05:00
struct device * dev = & pdev - > dev ;
2016-06-30 11:32:31 +02:00
struct advk_pcie * pcie ;
2017-06-28 15:13:56 -05:00
struct pci_host_bridge * bridge ;
2016-06-30 11:32:31 +02:00
int ret , irq ;
2017-06-28 15:13:56 -05:00
bridge = devm_pci_alloc_host_bridge ( dev , sizeof ( struct advk_pcie ) ) ;
if ( ! bridge )
2016-06-30 11:32:31 +02:00
return - ENOMEM ;
2017-06-28 15:13:56 -05:00
pcie = pci_host_bridge_priv ( bridge ) ;
2016-06-30 11:32:31 +02:00
pcie - > pdev = pdev ;
2020-09-07 13:10:37 +02:00
platform_set_drvdata ( pdev , pcie ) ;
2016-06-30 11:32:31 +02:00
2020-07-08 23:56:14 +08:00
pcie - > base = devm_platform_ioremap_resource ( pdev , 0 ) ;
2016-07-28 16:17:14 +00:00
if ( IS_ERR ( pcie - > base ) )
2016-06-30 11:32:31 +02:00
return PTR_ERR ( pcie - > base ) ;
irq = platform_get_irq ( pdev , 0 ) ;
2020-03-12 00:49:02 +05:30
if ( irq < 0 )
return irq ;
2016-10-06 13:27:46 -05:00
ret = devm_request_irq ( dev , irq , advk_pcie_irq_handler ,
2016-06-30 11:32:31 +02:00
IRQF_SHARED | IRQF_NO_THREAD , " advk-pcie " ,
pcie ) ;
if ( ret ) {
2016-10-06 13:27:46 -05:00
dev_err ( dev , " Failed to register interrupt \n " ) ;
2016-06-30 11:32:31 +02:00
return ret ;
}
2020-04-30 10:06:18 +02:00
pcie - > reset_gpio = devm_gpiod_get_from_of_node ( dev , dev - > of_node ,
" reset-gpios " , 0 ,
GPIOD_OUT_LOW ,
" pcie1-reset " ) ;
ret = PTR_ERR_OR_ZERO ( pcie - > reset_gpio ) ;
if ( ret ) {
if ( ret = = - ENOENT ) {
pcie - > reset_gpio = NULL ;
} else {
if ( ret ! = - EPROBE_DEFER )
dev_err ( dev , " Failed to get reset-gpio: %i \n " ,
ret ) ;
return ret ;
}
}
2020-04-30 10:06:17 +02:00
ret = of_pci_get_max_link_speed ( dev - > of_node ) ;
if ( ret < = 0 | | ret > 3 )
pcie - > link_gen = 3 ;
else
pcie - > link_gen = ret ;
2020-04-30 10:06:20 +02:00
ret = advk_pcie_setup_phy ( pcie ) ;
if ( ret )
return ret ;
2016-06-30 11:32:31 +02:00
advk_pcie_setup_hw ( pcie ) ;
2020-09-07 13:10:35 +02:00
ret = advk_sw_pci_bridge_init ( pcie ) ;
if ( ret ) {
dev_err ( dev , " Failed to register emulated root PCI bridge \n " ) ;
return ret ;
}
2018-10-18 17:37:19 +02:00
2016-06-30 11:32:31 +02:00
ret = advk_pcie_init_irq_domain ( pcie ) ;
if ( ret ) {
2016-10-06 13:27:46 -05:00
dev_err ( dev , " Failed to initialize irq \n " ) ;
2016-06-30 11:32:31 +02:00
return ret ;
}
ret = advk_pcie_init_msi_irq_domain ( pcie ) ;
if ( ret ) {
2016-10-06 13:27:46 -05:00
dev_err ( dev , " Failed to initialize irq \n " ) ;
2016-06-30 11:32:31 +02:00
advk_pcie_remove_irq_domain ( pcie ) ;
return ret ;
}
2017-06-28 15:13:56 -05:00
bridge - > sysdata = pcie ;
bridge - > ops = & advk_pcie_ops ;
2018-06-29 11:16:20 +02:00
ret = pci_host_probe ( bridge ) ;
2017-06-28 15:13:56 -05:00
if ( ret < 0 ) {
2016-06-30 11:32:31 +02:00
advk_pcie_remove_msi_irq_domain ( pcie ) ;
advk_pcie_remove_irq_domain ( pcie ) ;
2017-06-28 15:13:56 -05:00
return ret ;
2016-06-30 11:32:31 +02:00
}
return 0 ;
}
2020-09-07 13:10:37 +02:00
static int advk_pcie_remove ( struct platform_device * pdev )
{
struct advk_pcie * pcie = platform_get_drvdata ( pdev ) ;
struct pci_host_bridge * bridge = pci_host_bridge_from_priv ( pcie ) ;
pci_lock_rescan_remove ( ) ;
pci_stop_root_bus ( bridge - > bus ) ;
pci_remove_root_bus ( bridge - > bus ) ;
pci_unlock_rescan_remove ( ) ;
advk_pcie_remove_msi_irq_domain ( pcie ) ;
advk_pcie_remove_irq_domain ( pcie ) ;
return 0 ;
}
2016-06-30 11:32:31 +02:00
static const struct of_device_id advk_pcie_of_match_table [ ] = {
{ . compatible = " marvell,armada-3700-pcie " , } ,
{ } ,
} ;
2020-09-07 13:10:37 +02:00
MODULE_DEVICE_TABLE ( of , advk_pcie_of_match_table ) ;
2016-06-30 11:32:31 +02:00
static struct platform_driver advk_pcie_driver = {
. driver = {
. name = " advk-pcie " ,
. of_match_table = advk_pcie_of_match_table ,
} ,
. probe = advk_pcie_probe ,
2020-09-07 13:10:37 +02:00
. remove = advk_pcie_remove ,
2016-06-30 11:32:31 +02:00
} ;
2020-09-07 13:10:37 +02:00
module_platform_driver ( advk_pcie_driver ) ;
MODULE_DESCRIPTION ( " Aardvark PCIe controller " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;