2021-04-20 14:17:19 +08:00
// SPDX-License-Identifier: GPL-2.0
/*
* MediaTek PCIe host controller driver .
*
* Copyright ( c ) 2020 MediaTek Inc .
* Author : Jianjun Wang < jianjun . wang @ mediatek . com >
*/
# include <linux/clk.h>
# include <linux/delay.h>
# include <linux/iopoll.h>
2021-04-20 14:17:20 +08:00
# include <linux/irq.h>
# include <linux/irqchip/chained_irq.h>
# include <linux/irqdomain.h>
2021-04-20 14:17:19 +08:00
# include <linux/kernel.h>
# include <linux/module.h>
2021-04-20 14:17:21 +08:00
# include <linux/msi.h>
2021-04-20 14:17:19 +08:00
# include <linux/pci.h>
# include <linux/phy/phy.h>
# include <linux/platform_device.h>
# include <linux/pm_domain.h>
# include <linux/pm_runtime.h>
# include <linux/reset.h>
# include "../pci.h"
# define PCIE_SETTING_REG 0x80
# define PCIE_PCI_IDS_1 0x9c
# define PCI_CLASS(class) (class << 8)
# define PCIE_RC_MODE BIT(0)
# define PCIE_CFGNUM_REG 0x140
# define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
# define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
# define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16))
# define PCIE_CFG_FORCE_BYTE_EN BIT(20)
# define PCIE_CFG_OFFSET_ADDR 0x1000
# define PCIE_CFG_HEADER(bus, devfn) \
( PCIE_CFG_BUS ( bus ) | PCIE_CFG_DEVFN ( devfn ) )
# define PCIE_RST_CTRL_REG 0x148
# define PCIE_MAC_RSTB BIT(0)
# define PCIE_PHY_RSTB BIT(1)
# define PCIE_BRG_RSTB BIT(2)
# define PCIE_PE_RSTB BIT(3)
# define PCIE_LTSSM_STATUS_REG 0x150
2021-04-20 14:17:22 +08:00
# define PCIE_LTSSM_STATE_MASK GENMASK(28, 24)
# define PCIE_LTSSM_STATE(val) ((val & PCIE_LTSSM_STATE_MASK) >> 24)
# define PCIE_LTSSM_STATE_L2_IDLE 0x14
2021-04-20 14:17:19 +08:00
# define PCIE_LINK_STATUS_REG 0x154
# define PCIE_PORT_LINKUP BIT(8)
2021-04-20 14:17:21 +08:00
# define PCIE_MSI_SET_NUM 8
# define PCIE_MSI_IRQS_PER_SET 32
# define PCIE_MSI_IRQS_NUM \
( PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM )
2021-04-20 14:17:20 +08:00
# define PCIE_INT_ENABLE_REG 0x180
2021-04-20 14:17:21 +08:00
# define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
# define PCIE_MSI_SHIFT 8
2021-04-20 14:17:20 +08:00
# define PCIE_INTX_SHIFT 24
# define PCIE_INTX_ENABLE \
GENMASK ( PCIE_INTX_SHIFT + PCI_NUM_INTX - 1 , PCIE_INTX_SHIFT )
# define PCIE_INT_STATUS_REG 0x184
2021-04-20 14:17:21 +08:00
# define PCIE_MSI_SET_ENABLE_REG 0x190
# define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
# define PCIE_MSI_SET_BASE_REG 0xc00
# define PCIE_MSI_SET_OFFSET 0x10
# define PCIE_MSI_SET_STATUS_OFFSET 0x04
# define PCIE_MSI_SET_ENABLE_OFFSET 0x08
# define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
# define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
2021-04-20 14:17:20 +08:00
2021-04-20 14:17:22 +08:00
# define PCIE_ICMD_PM_REG 0x198
# define PCIE_TURN_OFF_LINK BIT(4)
2021-10-15 14:36:02 +08:00
# define PCIE_MISC_CTRL_REG 0x348
# define PCIE_DISABLE_DVFSRC_VLT_REQ BIT(1)
2021-04-20 14:17:19 +08:00
# define PCIE_TRANS_TABLE_BASE_REG 0x800
# define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
# define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
# define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc
# define PCIE_ATR_TRSL_PARAM_OFFSET 0x10
# define PCIE_ATR_TLB_SET_OFFSET 0x20
# define PCIE_MAX_TRANS_TABLES 8
# define PCIE_ATR_EN BIT(0)
# define PCIE_ATR_SIZE(size) \
( ( ( ( ( size ) - 1 ) < < 1 ) & GENMASK ( 6 , 1 ) ) | PCIE_ATR_EN )
# define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0))
# define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0)
# define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1)
# define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16))
# define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
# define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
2021-04-20 14:17:21 +08:00
/**
* struct mtk_msi_set - MSI information for each set
* @ base : IO mapped register base
* @ msg_addr : MSI message address
2021-04-20 14:17:22 +08:00
* @ saved_irq_state : IRQ enable state saved at suspend time
2021-04-20 14:17:21 +08:00
*/
struct mtk_msi_set {
void __iomem * base ;
phys_addr_t msg_addr ;
2021-04-20 14:17:22 +08:00
u32 saved_irq_state ;
2021-04-20 14:17:21 +08:00
} ;
2021-04-20 14:17:19 +08:00
/**
2021-12-22 19:10:45 -06:00
* struct mtk_gen3_pcie - PCIe port information
2021-04-20 14:17:19 +08:00
* @ dev : pointer to PCIe device
* @ base : IO mapped register base
* @ reg_base : physical register base
* @ mac_reset : MAC reset control
* @ phy_reset : PHY reset control
* @ phy : PHY controller block
* @ clks : PCIe clocks
* @ num_clks : PCIe clocks count for this port
2021-04-20 14:17:20 +08:00
* @ irq : PCIe controller interrupt number
2021-04-20 14:17:22 +08:00
* @ saved_irq_state : IRQ enable state saved at suspend time
2021-04-20 14:17:20 +08:00
* @ irq_lock : lock protecting IRQ register access
* @ intx_domain : legacy INTx IRQ domain
2021-04-20 14:17:21 +08:00
* @ msi_domain : MSI IRQ domain
* @ msi_bottom_domain : MSI IRQ bottom domain
* @ msi_sets : MSI sets information
* @ lock : lock protecting IRQ bit map
* @ msi_irq_in_use : bit map for assigned MSI IRQ
2021-04-20 14:17:19 +08:00
*/
2021-12-22 19:10:45 -06:00
struct mtk_gen3_pcie {
2021-04-20 14:17:19 +08:00
struct device * dev ;
void __iomem * base ;
phys_addr_t reg_base ;
struct reset_control * mac_reset ;
struct reset_control * phy_reset ;
struct phy * phy ;
struct clk_bulk_data * clks ;
int num_clks ;
2021-04-20 14:17:20 +08:00
int irq ;
2021-04-20 14:17:22 +08:00
u32 saved_irq_state ;
2021-04-20 14:17:20 +08:00
raw_spinlock_t irq_lock ;
struct irq_domain * intx_domain ;
2021-04-20 14:17:21 +08:00
struct irq_domain * msi_domain ;
struct irq_domain * msi_bottom_domain ;
struct mtk_msi_set msi_sets [ PCIE_MSI_SET_NUM ] ;
struct mutex lock ;
DECLARE_BITMAP ( msi_irq_in_use , PCIE_MSI_IRQS_NUM ) ;
2021-04-20 14:17:19 +08:00
} ;
/**
* mtk_pcie_config_tlp_header ( ) - Configure a configuration TLP header
* @ bus : PCI bus to query
* @ devfn : device / function number
* @ where : offset in config space
* @ size : data size in TLP header
*
* Set byte enable field and device information in configuration TLP header .
*/
static void mtk_pcie_config_tlp_header ( struct pci_bus * bus , unsigned int devfn ,
int where , int size )
{
2021-12-22 19:10:45 -06:00
struct mtk_gen3_pcie * pcie = bus - > sysdata ;
2021-04-20 14:17:19 +08:00
int bytes ;
u32 val ;
bytes = ( GENMASK ( size - 1 , 0 ) & 0xf ) < < ( where & 0x3 ) ;
val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN ( bytes ) |
PCIE_CFG_HEADER ( bus - > number , devfn ) ;
2021-12-22 19:10:45 -06:00
writel_relaxed ( val , pcie - > base + PCIE_CFGNUM_REG ) ;
2021-04-20 14:17:19 +08:00
}
static void __iomem * mtk_pcie_map_bus ( struct pci_bus * bus , unsigned int devfn ,
int where )
{
2021-12-22 19:10:45 -06:00
struct mtk_gen3_pcie * pcie = bus - > sysdata ;
2021-04-20 14:17:19 +08:00
2021-12-22 19:10:45 -06:00
return pcie - > base + PCIE_CFG_OFFSET_ADDR + where ;
2021-04-20 14:17:19 +08:00
}
static int mtk_pcie_config_read ( struct pci_bus * bus , unsigned int devfn ,
int where , int size , u32 * val )
{
mtk_pcie_config_tlp_header ( bus , devfn , where , size ) ;
return pci_generic_config_read32 ( bus , devfn , where , size , val ) ;
}
static int mtk_pcie_config_write ( struct pci_bus * bus , unsigned int devfn ,
int where , int size , u32 val )
{
mtk_pcie_config_tlp_header ( bus , devfn , where , size ) ;
if ( size < = 2 )
val < < = ( where & 0x3 ) * 8 ;
return pci_generic_config_write32 ( bus , devfn , where , 4 , val ) ;
}
static struct pci_ops mtk_pcie_ops = {
. map_bus = mtk_pcie_map_bus ,
. read = mtk_pcie_config_read ,
. write = mtk_pcie_config_write ,
} ;
2021-12-22 19:10:45 -06:00
static int mtk_pcie_set_trans_table ( struct mtk_gen3_pcie * pcie ,
2021-04-20 14:17:19 +08:00
resource_size_t cpu_addr ,
resource_size_t pci_addr ,
resource_size_t size ,
unsigned long type , int num )
{
void __iomem * table ;
u32 val ;
if ( num > = PCIE_MAX_TRANS_TABLES ) {
2021-12-22 19:10:45 -06:00
dev_err ( pcie - > dev , " not enough translate table for addr: %#llx, limited to [%d] \n " ,
2021-04-20 14:17:19 +08:00
( unsigned long long ) cpu_addr , PCIE_MAX_TRANS_TABLES ) ;
return - ENODEV ;
}
2021-12-22 19:10:45 -06:00
table = pcie - > base + PCIE_TRANS_TABLE_BASE_REG +
2021-04-20 14:17:19 +08:00
num * PCIE_ATR_TLB_SET_OFFSET ;
writel_relaxed ( lower_32_bits ( cpu_addr ) | PCIE_ATR_SIZE ( fls ( size ) - 1 ) ,
table ) ;
writel_relaxed ( upper_32_bits ( cpu_addr ) ,
table + PCIE_ATR_SRC_ADDR_MSB_OFFSET ) ;
writel_relaxed ( lower_32_bits ( pci_addr ) ,
table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET ) ;
writel_relaxed ( upper_32_bits ( pci_addr ) ,
table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET ) ;
if ( type = = IORESOURCE_IO )
val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO ;
else
val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM ;
writel_relaxed ( val , table + PCIE_ATR_TRSL_PARAM_OFFSET ) ;
return 0 ;
}
2021-12-22 19:10:45 -06:00
static void mtk_pcie_enable_msi ( struct mtk_gen3_pcie * pcie )
2021-04-20 14:17:21 +08:00
{
int i ;
u32 val ;
for ( i = 0 ; i < PCIE_MSI_SET_NUM ; i + + ) {
2021-12-22 19:10:45 -06:00
struct mtk_msi_set * msi_set = & pcie - > msi_sets [ i ] ;
2021-04-20 14:17:21 +08:00
2021-12-22 19:10:45 -06:00
msi_set - > base = pcie - > base + PCIE_MSI_SET_BASE_REG +
2021-04-20 14:17:21 +08:00
i * PCIE_MSI_SET_OFFSET ;
2021-12-22 19:10:45 -06:00
msi_set - > msg_addr = pcie - > reg_base + PCIE_MSI_SET_BASE_REG +
2021-04-20 14:17:21 +08:00
i * PCIE_MSI_SET_OFFSET ;
/* Configure the MSI capture address */
writel_relaxed ( lower_32_bits ( msi_set - > msg_addr ) , msi_set - > base ) ;
writel_relaxed ( upper_32_bits ( msi_set - > msg_addr ) ,
2021-12-22 19:10:45 -06:00
pcie - > base + PCIE_MSI_SET_ADDR_HI_BASE +
2021-04-20 14:17:21 +08:00
i * PCIE_MSI_SET_ADDR_HI_OFFSET ) ;
}
2021-12-22 19:10:45 -06:00
val = readl_relaxed ( pcie - > base + PCIE_MSI_SET_ENABLE_REG ) ;
2021-04-20 14:17:21 +08:00
val | = PCIE_MSI_SET_ENABLE ;
2021-12-22 19:10:45 -06:00
writel_relaxed ( val , pcie - > base + PCIE_MSI_SET_ENABLE_REG ) ;
2021-04-20 14:17:21 +08:00
2021-12-22 19:10:45 -06:00
val = readl_relaxed ( pcie - > base + PCIE_INT_ENABLE_REG ) ;
2021-04-20 14:17:21 +08:00
val | = PCIE_MSI_ENABLE ;
2021-12-22 19:10:45 -06:00
writel_relaxed ( val , pcie - > base + PCIE_INT_ENABLE_REG ) ;
2021-04-20 14:17:21 +08:00
}
2021-12-22 19:10:45 -06:00
static int mtk_pcie_startup_port ( struct mtk_gen3_pcie * pcie )
2021-04-20 14:17:19 +08:00
{
struct resource_entry * entry ;
2021-12-22 19:10:45 -06:00
struct pci_host_bridge * host = pci_host_bridge_from_priv ( pcie ) ;
2021-04-20 14:17:19 +08:00
unsigned int table_index = 0 ;
int err ;
u32 val ;
/* Set as RC mode */
2021-12-22 19:10:45 -06:00
val = readl_relaxed ( pcie - > base + PCIE_SETTING_REG ) ;
2021-04-20 14:17:19 +08:00
val | = PCIE_RC_MODE ;
2021-12-22 19:10:45 -06:00
writel_relaxed ( val , pcie - > base + PCIE_SETTING_REG ) ;
2021-04-20 14:17:19 +08:00
/* Set class code */
2021-12-22 19:10:45 -06:00
val = readl_relaxed ( pcie - > base + PCIE_PCI_IDS_1 ) ;
2021-04-20 14:17:19 +08:00
val & = ~ GENMASK ( 31 , 8 ) ;
2022-02-14 12:41:08 +01:00
val | = PCI_CLASS ( PCI_CLASS_BRIDGE_PCI_NORMAL ) ;
2021-12-22 19:10:45 -06:00
writel_relaxed ( val , pcie - > base + PCIE_PCI_IDS_1 ) ;
2021-04-20 14:17:19 +08:00
2021-04-20 14:17:20 +08:00
/* Mask all INTx interrupts */
2021-12-22 19:10:45 -06:00
val = readl_relaxed ( pcie - > base + PCIE_INT_ENABLE_REG ) ;
2021-04-20 14:17:20 +08:00
val & = ~ PCIE_INTX_ENABLE ;
2021-12-22 19:10:45 -06:00
writel_relaxed ( val , pcie - > base + PCIE_INT_ENABLE_REG ) ;
2021-04-20 14:17:20 +08:00
2021-10-15 14:36:02 +08:00
/* Disable DVFSRC voltage request */
Merge branch 'pci/driver-cleanup'
- Use of_device_get_match_data(), not of_match_device(), when we only need
the device data in altera, artpec6, cadence, designware-plat, dra7xx,
keystone, kirin (Fan Fei)
- Drop pointless of_device_get_match_data() cast in j721e (Bjorn Helgaas)
- Drop redundant struct device * from j721e since struct cdns_pcie already
has one (Bjorn Helgaas)
- Rename driver structs to *_pcie in intel-gw, iproc, ls-gen4,
mediatek-gen3, microchip, mt7621, rcar-gen2, tegra194, uniphier, xgene,
xilinx, xilinx-cpm for consistency across drivers (Fan Fei)
- Fix invalid address space conversions in hisi, spear13xx (Bjorn Helgaas)
* pci/driver-cleanup:
PCI: spear13xx: Avoid invalid address space conversions
PCI: hisi: Avoid invalid address space conversions
PCI: xilinx-cpm: Rename xilinx_cpm_pcie_port to xilinx_cpm_pcie
PCI: xilinx: Rename xilinx_pcie_port to xilinx_pcie
PCI: xgene: Rename xgene_pcie_port to xgene_pcie
PCI: uniphier: Rename uniphier_pcie_priv to uniphier_pcie
PCI: tegra194: Rename tegra_pcie_dw to tegra194_pcie
PCI: rcar-gen2: Rename rcar_pci_priv to rcar_pci
PCI: mt7621: Rename mt7621_pci_ to mt7621_pcie_
PCI: microchip: Rename mc_port to mc_pcie
PCI: mediatek-gen3: Rename mtk_pcie_port to mtk_gen3_pcie
PCI: ls-gen4: Rename ls_pcie_g4 to ls_g4_pcie
PCI: iproc: Rename iproc_pcie_pltfm_ to iproc_pltfm_pcie_
PCI: iproc: Rename iproc_pcie_bcma_ to iproc_bcma_pcie_
PCI: intel-gw: Rename intel_pcie_port to intel_pcie
PCI: j721e: Drop redundant struct device *
PCI: j721e: Drop pointless of_device_get_match_data() cast
PCI: kirin: Prefer of_device_get_match_data()
PCI: keystone: Prefer of_device_get_match_data()
PCI: dra7xx: Prefer of_device_get_match_data()
PCI: designware-plat: Prefer of_device_get_match_data()
PCI: cadence: Prefer of_device_get_match_data()
PCI: artpec6: Prefer of_device_get_match_data()
PCI: altera: Prefer of_device_get_match_data()
# Conflicts:
# drivers/pci/controller/pcie-mt7621.c
2022-01-13 09:57:53 -06:00
val = readl_relaxed ( pcie - > base + PCIE_MISC_CTRL_REG ) ;
2021-10-15 14:36:02 +08:00
val | = PCIE_DISABLE_DVFSRC_VLT_REQ ;
Merge branch 'pci/driver-cleanup'
- Use of_device_get_match_data(), not of_match_device(), when we only need
the device data in altera, artpec6, cadence, designware-plat, dra7xx,
keystone, kirin (Fan Fei)
- Drop pointless of_device_get_match_data() cast in j721e (Bjorn Helgaas)
- Drop redundant struct device * from j721e since struct cdns_pcie already
has one (Bjorn Helgaas)
- Rename driver structs to *_pcie in intel-gw, iproc, ls-gen4,
mediatek-gen3, microchip, mt7621, rcar-gen2, tegra194, uniphier, xgene,
xilinx, xilinx-cpm for consistency across drivers (Fan Fei)
- Fix invalid address space conversions in hisi, spear13xx (Bjorn Helgaas)
* pci/driver-cleanup:
PCI: spear13xx: Avoid invalid address space conversions
PCI: hisi: Avoid invalid address space conversions
PCI: xilinx-cpm: Rename xilinx_cpm_pcie_port to xilinx_cpm_pcie
PCI: xilinx: Rename xilinx_pcie_port to xilinx_pcie
PCI: xgene: Rename xgene_pcie_port to xgene_pcie
PCI: uniphier: Rename uniphier_pcie_priv to uniphier_pcie
PCI: tegra194: Rename tegra_pcie_dw to tegra194_pcie
PCI: rcar-gen2: Rename rcar_pci_priv to rcar_pci
PCI: mt7621: Rename mt7621_pci_ to mt7621_pcie_
PCI: microchip: Rename mc_port to mc_pcie
PCI: mediatek-gen3: Rename mtk_pcie_port to mtk_gen3_pcie
PCI: ls-gen4: Rename ls_pcie_g4 to ls_g4_pcie
PCI: iproc: Rename iproc_pcie_pltfm_ to iproc_pltfm_pcie_
PCI: iproc: Rename iproc_pcie_bcma_ to iproc_bcma_pcie_
PCI: intel-gw: Rename intel_pcie_port to intel_pcie
PCI: j721e: Drop redundant struct device *
PCI: j721e: Drop pointless of_device_get_match_data() cast
PCI: kirin: Prefer of_device_get_match_data()
PCI: keystone: Prefer of_device_get_match_data()
PCI: dra7xx: Prefer of_device_get_match_data()
PCI: designware-plat: Prefer of_device_get_match_data()
PCI: cadence: Prefer of_device_get_match_data()
PCI: artpec6: Prefer of_device_get_match_data()
PCI: altera: Prefer of_device_get_match_data()
# Conflicts:
# drivers/pci/controller/pcie-mt7621.c
2022-01-13 09:57:53 -06:00
writel_relaxed ( val , pcie - > base + PCIE_MISC_CTRL_REG ) ;
2021-10-15 14:36:02 +08:00
2021-04-20 14:17:19 +08:00
/* Assert all reset signals */
2021-12-22 19:10:45 -06:00
val = readl_relaxed ( pcie - > base + PCIE_RST_CTRL_REG ) ;
2021-04-20 14:17:19 +08:00
val | = PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB ;
2021-12-22 19:10:45 -06:00
writel_relaxed ( val , pcie - > base + PCIE_RST_CTRL_REG ) ;
2021-04-20 14:17:19 +08:00
/*
2022-01-07 22:59:42 +00:00
* Described in PCIe CEM specification sections 2.2 ( PERST # Signal )
2021-04-20 14:17:19 +08:00
* and 2.2 .1 ( Initial Power - Up ( G3 to S0 ) ) .
* The deassertion of PERST # should be delayed 100 ms ( TPVPERL )
* for the power and clock to become stable .
*/
msleep ( 100 ) ;
/* De-assert reset signals */
val & = ~ ( PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB ) ;
2021-12-22 19:10:45 -06:00
writel_relaxed ( val , pcie - > base + PCIE_RST_CTRL_REG ) ;
2021-04-20 14:17:19 +08:00
/* Check if the link is up or not */
2021-12-22 19:10:45 -06:00
err = readl_poll_timeout ( pcie - > base + PCIE_LINK_STATUS_REG , val ,
2021-04-20 14:17:19 +08:00
! ! ( val & PCIE_PORT_LINKUP ) , 20 ,
PCI_PM_D3COLD_WAIT * USEC_PER_MSEC ) ;
if ( err ) {
2021-12-22 19:10:45 -06:00
val = readl_relaxed ( pcie - > base + PCIE_LTSSM_STATUS_REG ) ;
dev_err ( pcie - > dev , " PCIe link down, ltssm reg val: %#x \n " , val ) ;
2021-04-20 14:17:19 +08:00
return err ;
}
2021-12-22 19:10:45 -06:00
mtk_pcie_enable_msi ( pcie ) ;
2021-04-20 14:17:21 +08:00
2021-04-20 14:17:19 +08:00
/* Set PCIe translation windows */
resource_list_for_each_entry ( entry , & host - > windows ) {
struct resource * res = entry - > res ;
unsigned long type = resource_type ( res ) ;
resource_size_t cpu_addr ;
resource_size_t pci_addr ;
resource_size_t size ;
const char * range_type ;
if ( type = = IORESOURCE_IO ) {
cpu_addr = pci_pio_to_address ( res - > start ) ;
range_type = " IO " ;
} else if ( type = = IORESOURCE_MEM ) {
cpu_addr = res - > start ;
range_type = " MEM " ;
} else {
continue ;
}
pci_addr = res - > start - entry - > offset ;
size = resource_size ( res ) ;
2021-12-22 19:10:45 -06:00
err = mtk_pcie_set_trans_table ( pcie , cpu_addr , pci_addr , size ,
2021-04-20 14:17:19 +08:00
type , table_index ) ;
if ( err )
return err ;
2021-12-22 19:10:45 -06:00
dev_dbg ( pcie - > dev , " set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx \n " ,
2021-04-20 14:17:19 +08:00
range_type , table_index , ( unsigned long long ) cpu_addr ,
( unsigned long long ) pci_addr , ( unsigned long long ) size ) ;
table_index + + ;
}
return 0 ;
}
2021-04-20 14:17:20 +08:00
static int mtk_pcie_set_affinity ( struct irq_data * data ,
const struct cpumask * mask , bool force )
{
return - EINVAL ;
}
2021-04-20 14:17:21 +08:00
static void mtk_pcie_msi_irq_mask ( struct irq_data * data )
{
pci_msi_mask_irq ( data ) ;
irq_chip_mask_parent ( data ) ;
}
static void mtk_pcie_msi_irq_unmask ( struct irq_data * data )
{
pci_msi_unmask_irq ( data ) ;
irq_chip_unmask_parent ( data ) ;
}
static struct irq_chip mtk_msi_irq_chip = {
. irq_ack = irq_chip_ack_parent ,
. irq_mask = mtk_pcie_msi_irq_mask ,
. irq_unmask = mtk_pcie_msi_irq_unmask ,
. name = " MSI " ,
} ;
static struct msi_domain_info mtk_msi_domain_info = {
. flags = ( MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI ) ,
. chip = & mtk_msi_irq_chip ,
} ;
static void mtk_compose_msi_msg ( struct irq_data * data , struct msi_msg * msg )
{
struct mtk_msi_set * msi_set = irq_data_get_irq_chip_data ( data ) ;
2021-12-22 19:10:45 -06:00
struct mtk_gen3_pcie * pcie = data - > domain - > host_data ;
2021-04-20 14:17:21 +08:00
unsigned long hwirq ;
hwirq = data - > hwirq % PCIE_MSI_IRQS_PER_SET ;
msg - > address_hi = upper_32_bits ( msi_set - > msg_addr ) ;
msg - > address_lo = lower_32_bits ( msi_set - > msg_addr ) ;
msg - > data = hwirq ;
2021-12-22 19:10:45 -06:00
dev_dbg ( pcie - > dev , " msi#%#lx address_hi %#x address_lo %#x data %d \n " ,
2021-04-20 14:17:21 +08:00
hwirq , msg - > address_hi , msg - > address_lo , msg - > data ) ;
}
static void mtk_msi_bottom_irq_ack ( struct irq_data * data )
{
struct mtk_msi_set * msi_set = irq_data_get_irq_chip_data ( data ) ;
unsigned long hwirq ;
hwirq = data - > hwirq % PCIE_MSI_IRQS_PER_SET ;
writel_relaxed ( BIT ( hwirq ) , msi_set - > base + PCIE_MSI_SET_STATUS_OFFSET ) ;
}
static void mtk_msi_bottom_irq_mask ( struct irq_data * data )
{
struct mtk_msi_set * msi_set = irq_data_get_irq_chip_data ( data ) ;
2021-12-22 19:10:45 -06:00
struct mtk_gen3_pcie * pcie = data - > domain - > host_data ;
2021-04-20 14:17:21 +08:00
unsigned long hwirq , flags ;
u32 val ;
hwirq = data - > hwirq % PCIE_MSI_IRQS_PER_SET ;
2021-12-22 19:10:45 -06:00
raw_spin_lock_irqsave ( & pcie - > irq_lock , flags ) ;
2021-04-20 14:17:21 +08:00
val = readl_relaxed ( msi_set - > base + PCIE_MSI_SET_ENABLE_OFFSET ) ;
val & = ~ BIT ( hwirq ) ;
writel_relaxed ( val , msi_set - > base + PCIE_MSI_SET_ENABLE_OFFSET ) ;
2021-12-22 19:10:45 -06:00
raw_spin_unlock_irqrestore ( & pcie - > irq_lock , flags ) ;
2021-04-20 14:17:21 +08:00
}
static void mtk_msi_bottom_irq_unmask ( struct irq_data * data )
{
struct mtk_msi_set * msi_set = irq_data_get_irq_chip_data ( data ) ;
2021-12-22 19:10:45 -06:00
struct mtk_gen3_pcie * pcie = data - > domain - > host_data ;
2021-04-20 14:17:21 +08:00
unsigned long hwirq , flags ;
u32 val ;
hwirq = data - > hwirq % PCIE_MSI_IRQS_PER_SET ;
2021-12-22 19:10:45 -06:00
raw_spin_lock_irqsave ( & pcie - > irq_lock , flags ) ;
2021-04-20 14:17:21 +08:00
val = readl_relaxed ( msi_set - > base + PCIE_MSI_SET_ENABLE_OFFSET ) ;
val | = BIT ( hwirq ) ;
writel_relaxed ( val , msi_set - > base + PCIE_MSI_SET_ENABLE_OFFSET ) ;
2021-12-22 19:10:45 -06:00
raw_spin_unlock_irqrestore ( & pcie - > irq_lock , flags ) ;
2021-04-20 14:17:21 +08:00
}
static struct irq_chip mtk_msi_bottom_irq_chip = {
. irq_ack = mtk_msi_bottom_irq_ack ,
. irq_mask = mtk_msi_bottom_irq_mask ,
. irq_unmask = mtk_msi_bottom_irq_unmask ,
. irq_compose_msi_msg = mtk_compose_msi_msg ,
. irq_set_affinity = mtk_pcie_set_affinity ,
. name = " MSI " ,
} ;
static int mtk_msi_bottom_domain_alloc ( struct irq_domain * domain ,
unsigned int virq , unsigned int nr_irqs ,
void * arg )
{
2021-12-22 19:10:45 -06:00
struct mtk_gen3_pcie * pcie = domain - > host_data ;
2021-04-20 14:17:21 +08:00
struct mtk_msi_set * msi_set ;
int i , hwirq , set_idx ;
2021-12-22 19:10:45 -06:00
mutex_lock ( & pcie - > lock ) ;
2021-04-20 14:17:21 +08:00
2021-12-22 19:10:45 -06:00
hwirq = bitmap_find_free_region ( pcie - > msi_irq_in_use , PCIE_MSI_IRQS_NUM ,
2021-04-20 14:17:21 +08:00
order_base_2 ( nr_irqs ) ) ;
2021-12-22 19:10:45 -06:00
mutex_unlock ( & pcie - > lock ) ;
2021-04-20 14:17:21 +08:00
if ( hwirq < 0 )
return - ENOSPC ;
set_idx = hwirq / PCIE_MSI_IRQS_PER_SET ;
2021-12-22 19:10:45 -06:00
msi_set = & pcie - > msi_sets [ set_idx ] ;
2021-04-20 14:17:21 +08:00
for ( i = 0 ; i < nr_irqs ; i + + )
irq_domain_set_info ( domain , virq + i , hwirq + i ,
& mtk_msi_bottom_irq_chip , msi_set ,
handle_edge_irq , NULL , NULL ) ;
return 0 ;
}
static void mtk_msi_bottom_domain_free ( struct irq_domain * domain ,
unsigned int virq , unsigned int nr_irqs )
{
2021-12-22 19:10:45 -06:00
struct mtk_gen3_pcie * pcie = domain - > host_data ;
2021-04-20 14:17:21 +08:00
struct irq_data * data = irq_domain_get_irq_data ( domain , virq ) ;
2021-12-22 19:10:45 -06:00
mutex_lock ( & pcie - > lock ) ;
2021-04-20 14:17:21 +08:00
2021-12-22 19:10:45 -06:00
bitmap_release_region ( pcie - > msi_irq_in_use , data - > hwirq ,
2021-04-20 14:17:21 +08:00
order_base_2 ( nr_irqs ) ) ;
2021-12-22 19:10:45 -06:00
mutex_unlock ( & pcie - > lock ) ;
2021-04-20 14:17:21 +08:00
irq_domain_free_irqs_common ( domain , virq , nr_irqs ) ;
}
static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
. alloc = mtk_msi_bottom_domain_alloc ,
. free = mtk_msi_bottom_domain_free ,
} ;
2021-04-20 14:17:20 +08:00
static void mtk_intx_mask ( struct irq_data * data )
{
2021-12-22 19:10:45 -06:00
struct mtk_gen3_pcie * pcie = irq_data_get_irq_chip_data ( data ) ;
2021-04-20 14:17:20 +08:00
unsigned long flags ;
u32 val ;
2021-12-22 19:10:45 -06:00
raw_spin_lock_irqsave ( & pcie - > irq_lock , flags ) ;
val = readl_relaxed ( pcie - > base + PCIE_INT_ENABLE_REG ) ;
2021-04-20 14:17:20 +08:00
val & = ~ BIT ( data - > hwirq + PCIE_INTX_SHIFT ) ;
2021-12-22 19:10:45 -06:00
writel_relaxed ( val , pcie - > base + PCIE_INT_ENABLE_REG ) ;
raw_spin_unlock_irqrestore ( & pcie - > irq_lock , flags ) ;
2021-04-20 14:17:20 +08:00
}
static void mtk_intx_unmask ( struct irq_data * data )
{
2021-12-22 19:10:45 -06:00
struct mtk_gen3_pcie * pcie = irq_data_get_irq_chip_data ( data ) ;
2021-04-20 14:17:20 +08:00
unsigned long flags ;
u32 val ;
2021-12-22 19:10:45 -06:00
raw_spin_lock_irqsave ( & pcie - > irq_lock , flags ) ;
val = readl_relaxed ( pcie - > base + PCIE_INT_ENABLE_REG ) ;
2021-04-20 14:17:20 +08:00
val | = BIT ( data - > hwirq + PCIE_INTX_SHIFT ) ;
2021-12-22 19:10:45 -06:00
writel_relaxed ( val , pcie - > base + PCIE_INT_ENABLE_REG ) ;
raw_spin_unlock_irqrestore ( & pcie - > irq_lock , flags ) ;
2021-04-20 14:17:20 +08:00
}
/**
* mtk_intx_eoi ( ) - Clear INTx IRQ status at the end of interrupt
* @ data : pointer to chip specific data
*
* As an emulated level IRQ , its interrupt status will remain
* until the corresponding de - assert message is received ; hence that
* the status can only be cleared when the interrupt has been serviced .
*/
static void mtk_intx_eoi ( struct irq_data * data )
{
2021-12-22 19:10:45 -06:00
struct mtk_gen3_pcie * pcie = irq_data_get_irq_chip_data ( data ) ;
2021-04-20 14:17:20 +08:00
unsigned long hwirq ;
hwirq = data - > hwirq + PCIE_INTX_SHIFT ;
2021-12-22 19:10:45 -06:00
writel_relaxed ( BIT ( hwirq ) , pcie - > base + PCIE_INT_STATUS_REG ) ;
2021-04-20 14:17:20 +08:00
}
static struct irq_chip mtk_intx_irq_chip = {
. irq_mask = mtk_intx_mask ,
. irq_unmask = mtk_intx_unmask ,
. irq_eoi = mtk_intx_eoi ,
. irq_set_affinity = mtk_pcie_set_affinity ,
. name = " INTx " ,
} ;
static int mtk_pcie_intx_map ( struct irq_domain * domain , unsigned int irq ,
irq_hw_number_t hwirq )
{
irq_set_chip_data ( irq , domain - > host_data ) ;
irq_set_chip_and_handler_name ( irq , & mtk_intx_irq_chip ,
handle_fasteoi_irq , " INTx " ) ;
return 0 ;
}
static const struct irq_domain_ops intx_domain_ops = {
. map = mtk_pcie_intx_map ,
} ;
2021-12-22 19:10:45 -06:00
static int mtk_pcie_init_irq_domains ( struct mtk_gen3_pcie * pcie )
2021-04-20 14:17:20 +08:00
{
2021-12-22 19:10:45 -06:00
struct device * dev = pcie - > dev ;
2021-04-20 14:17:20 +08:00
struct device_node * intc_node , * node = dev - > of_node ;
2021-04-20 14:17:21 +08:00
int ret ;
2021-04-20 14:17:20 +08:00
2021-12-22 19:10:45 -06:00
raw_spin_lock_init ( & pcie - > irq_lock ) ;
2021-04-20 14:17:20 +08:00
/* Setup INTx */
intc_node = of_get_child_by_name ( node , " interrupt-controller " ) ;
if ( ! intc_node ) {
dev_err ( dev , " missing interrupt-controller node \n " ) ;
return - ENODEV ;
}
2021-12-22 19:10:45 -06:00
pcie - > intx_domain = irq_domain_add_linear ( intc_node , PCI_NUM_INTX ,
& intx_domain_ops , pcie ) ;
if ( ! pcie - > intx_domain ) {
2021-04-20 14:17:20 +08:00
dev_err ( dev , " failed to create INTx IRQ domain \n " ) ;
return - ENODEV ;
}
2021-04-20 14:17:21 +08:00
/* Setup MSI */
2021-12-22 19:10:45 -06:00
mutex_init ( & pcie - > lock ) ;
2021-04-20 14:17:21 +08:00
2021-12-22 19:10:45 -06:00
pcie - > msi_bottom_domain = irq_domain_add_linear ( node , PCIE_MSI_IRQS_NUM ,
& mtk_msi_bottom_domain_ops , pcie ) ;
if ( ! pcie - > msi_bottom_domain ) {
2021-04-20 14:17:21 +08:00
dev_err ( dev , " failed to create MSI bottom domain \n " ) ;
ret = - ENODEV ;
goto err_msi_bottom_domain ;
}
2021-12-22 19:10:45 -06:00
pcie - > msi_domain = pci_msi_create_irq_domain ( dev - > fwnode ,
2021-04-20 14:17:21 +08:00
& mtk_msi_domain_info ,
2021-12-22 19:10:45 -06:00
pcie - > msi_bottom_domain ) ;
if ( ! pcie - > msi_domain ) {
2021-04-20 14:17:21 +08:00
dev_err ( dev , " failed to create MSI domain \n " ) ;
ret = - ENODEV ;
goto err_msi_domain ;
}
2021-04-20 14:17:20 +08:00
return 0 ;
2021-04-20 14:17:21 +08:00
err_msi_domain :
2021-12-22 19:10:45 -06:00
irq_domain_remove ( pcie - > msi_bottom_domain ) ;
2021-04-20 14:17:21 +08:00
err_msi_bottom_domain :
2021-12-22 19:10:45 -06:00
irq_domain_remove ( pcie - > intx_domain ) ;
2021-04-20 14:17:21 +08:00
return ret ;
2021-04-20 14:17:20 +08:00
}
2021-12-22 19:10:45 -06:00
static void mtk_pcie_irq_teardown ( struct mtk_gen3_pcie * pcie )
2021-04-20 14:17:20 +08:00
{
2021-12-22 19:10:45 -06:00
irq_set_chained_handler_and_data ( pcie - > irq , NULL , NULL ) ;
2021-04-20 14:17:20 +08:00
2021-12-22 19:10:45 -06:00
if ( pcie - > intx_domain )
irq_domain_remove ( pcie - > intx_domain ) ;
2021-04-20 14:17:20 +08:00
2021-12-22 19:10:45 -06:00
if ( pcie - > msi_domain )
irq_domain_remove ( pcie - > msi_domain ) ;
2021-04-20 14:17:21 +08:00
2021-12-22 19:10:45 -06:00
if ( pcie - > msi_bottom_domain )
irq_domain_remove ( pcie - > msi_bottom_domain ) ;
2021-04-20 14:17:21 +08:00
2021-12-22 19:10:45 -06:00
irq_dispose_mapping ( pcie - > irq ) ;
2021-04-20 14:17:20 +08:00
}
2021-12-22 19:10:45 -06:00
static void mtk_pcie_msi_handler ( struct mtk_gen3_pcie * pcie , int set_idx )
2021-04-20 14:17:21 +08:00
{
2021-12-22 19:10:45 -06:00
struct mtk_msi_set * msi_set = & pcie - > msi_sets [ set_idx ] ;
2021-04-20 14:17:21 +08:00
unsigned long msi_enable , msi_status ;
irq_hw_number_t bit , hwirq ;
msi_enable = readl_relaxed ( msi_set - > base + PCIE_MSI_SET_ENABLE_OFFSET ) ;
do {
msi_status = readl_relaxed ( msi_set - > base +
PCIE_MSI_SET_STATUS_OFFSET ) ;
msi_status & = msi_enable ;
if ( ! msi_status )
break ;
for_each_set_bit ( bit , & msi_status , PCIE_MSI_IRQS_PER_SET ) {
hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET ;
2021-12-22 19:10:45 -06:00
generic_handle_domain_irq ( pcie - > msi_bottom_domain , hwirq ) ;
2021-04-20 14:17:21 +08:00
}
} while ( true ) ;
}
2021-04-20 14:17:20 +08:00
static void mtk_pcie_irq_handler ( struct irq_desc * desc )
{
2021-12-22 19:10:45 -06:00
struct mtk_gen3_pcie * pcie = irq_desc_get_handler_data ( desc ) ;
2021-04-20 14:17:20 +08:00
struct irq_chip * irqchip = irq_desc_get_chip ( desc ) ;
unsigned long status ;
irq_hw_number_t irq_bit = PCIE_INTX_SHIFT ;
chained_irq_enter ( irqchip , desc ) ;
2021-12-22 19:10:45 -06:00
status = readl_relaxed ( pcie - > base + PCIE_INT_STATUS_REG ) ;
2021-04-20 14:17:20 +08:00
for_each_set_bit_from ( irq_bit , & status , PCI_NUM_INTX +
2021-08-02 17:26:19 +01:00
PCIE_INTX_SHIFT )
2021-12-22 19:10:45 -06:00
generic_handle_domain_irq ( pcie - > intx_domain ,
2021-08-02 17:26:19 +01:00
irq_bit - PCIE_INTX_SHIFT ) ;
2021-04-20 14:17:20 +08:00
2021-04-20 14:17:21 +08:00
irq_bit = PCIE_MSI_SHIFT ;
for_each_set_bit_from ( irq_bit , & status , PCIE_MSI_SET_NUM +
PCIE_MSI_SHIFT ) {
2021-12-22 19:10:45 -06:00
mtk_pcie_msi_handler ( pcie , irq_bit - PCIE_MSI_SHIFT ) ;
2021-04-20 14:17:21 +08:00
2021-12-22 19:10:45 -06:00
writel_relaxed ( BIT ( irq_bit ) , pcie - > base + PCIE_INT_STATUS_REG ) ;
2021-04-20 14:17:21 +08:00
}
2021-04-20 14:17:20 +08:00
chained_irq_exit ( irqchip , desc ) ;
}
2021-12-22 19:10:45 -06:00
static int mtk_pcie_setup_irq ( struct mtk_gen3_pcie * pcie )
2021-04-20 14:17:20 +08:00
{
2021-12-22 19:10:45 -06:00
struct device * dev = pcie - > dev ;
2021-04-20 14:17:20 +08:00
struct platform_device * pdev = to_platform_device ( dev ) ;
int err ;
2021-12-22 19:10:45 -06:00
err = mtk_pcie_init_irq_domains ( pcie ) ;
2021-04-20 14:17:20 +08:00
if ( err )
return err ;
2021-12-22 19:10:45 -06:00
pcie - > irq = platform_get_irq ( pdev , 0 ) ;
if ( pcie - > irq < 0 )
return pcie - > irq ;
2021-04-20 14:17:20 +08:00
2021-12-22 19:10:45 -06:00
irq_set_chained_handler_and_data ( pcie - > irq , mtk_pcie_irq_handler , pcie ) ;
2021-04-20 14:17:20 +08:00
return 0 ;
}
2021-12-22 19:10:45 -06:00
static int mtk_pcie_parse_port ( struct mtk_gen3_pcie * pcie )
2021-04-20 14:17:19 +08:00
{
2021-12-22 19:10:45 -06:00
struct device * dev = pcie - > dev ;
2021-04-20 14:17:19 +08:00
struct platform_device * pdev = to_platform_device ( dev ) ;
struct resource * regs ;
int ret ;
regs = platform_get_resource_byname ( pdev , IORESOURCE_MEM , " pcie-mac " ) ;
if ( ! regs )
return - EINVAL ;
2021-12-22 19:10:45 -06:00
pcie - > base = devm_ioremap_resource ( dev , regs ) ;
if ( IS_ERR ( pcie - > base ) ) {
2021-04-20 14:17:19 +08:00
dev_err ( dev , " failed to map register base \n " ) ;
2021-12-22 19:10:45 -06:00
return PTR_ERR ( pcie - > base ) ;
2021-04-20 14:17:19 +08:00
}
2021-12-22 19:10:45 -06:00
pcie - > reg_base = regs - > start ;
2021-04-20 14:17:19 +08:00
2021-12-22 19:10:45 -06:00
pcie - > phy_reset = devm_reset_control_get_optional_exclusive ( dev , " phy " ) ;
if ( IS_ERR ( pcie - > phy_reset ) ) {
ret = PTR_ERR ( pcie - > phy_reset ) ;
2021-04-20 14:17:19 +08:00
if ( ret ! = - EPROBE_DEFER )
dev_err ( dev , " failed to get PHY reset \n " ) ;
return ret ;
}
2021-12-22 19:10:45 -06:00
pcie - > mac_reset = devm_reset_control_get_optional_exclusive ( dev , " mac " ) ;
if ( IS_ERR ( pcie - > mac_reset ) ) {
ret = PTR_ERR ( pcie - > mac_reset ) ;
2021-04-20 14:17:19 +08:00
if ( ret ! = - EPROBE_DEFER )
dev_err ( dev , " failed to get MAC reset \n " ) ;
return ret ;
}
2021-12-22 19:10:45 -06:00
pcie - > phy = devm_phy_optional_get ( dev , " pcie-phy " ) ;
if ( IS_ERR ( pcie - > phy ) ) {
ret = PTR_ERR ( pcie - > phy ) ;
2021-04-20 14:17:19 +08:00
if ( ret ! = - EPROBE_DEFER )
dev_err ( dev , " failed to get PHY \n " ) ;
return ret ;
}
2021-12-22 19:10:45 -06:00
pcie - > num_clks = devm_clk_bulk_get_all ( dev , & pcie - > clks ) ;
if ( pcie - > num_clks < 0 ) {
2021-04-20 14:17:19 +08:00
dev_err ( dev , " failed to get clocks \n " ) ;
2021-12-22 19:10:45 -06:00
return pcie - > num_clks ;
2021-04-20 14:17:19 +08:00
}
return 0 ;
}
2021-12-22 19:10:45 -06:00
static int mtk_pcie_power_up ( struct mtk_gen3_pcie * pcie )
2021-04-20 14:17:19 +08:00
{
2021-12-22 19:10:45 -06:00
struct device * dev = pcie - > dev ;
2021-04-20 14:17:19 +08:00
int err ;
/* PHY power on and enable pipe clock */
2021-12-22 19:10:45 -06:00
reset_control_deassert ( pcie - > phy_reset ) ;
2021-04-20 14:17:19 +08:00
2021-12-22 19:10:45 -06:00
err = phy_init ( pcie - > phy ) ;
2021-04-20 14:17:19 +08:00
if ( err ) {
dev_err ( dev , " failed to initialize PHY \n " ) ;
goto err_phy_init ;
}
2021-12-22 19:10:45 -06:00
err = phy_power_on ( pcie - > phy ) ;
2021-04-20 14:17:19 +08:00
if ( err ) {
dev_err ( dev , " failed to power on PHY \n " ) ;
goto err_phy_on ;
}
/* MAC power on and enable transaction layer clocks */
2021-12-22 19:10:45 -06:00
reset_control_deassert ( pcie - > mac_reset ) ;
2021-04-20 14:17:19 +08:00
pm_runtime_enable ( dev ) ;
pm_runtime_get_sync ( dev ) ;
2021-12-22 19:10:45 -06:00
err = clk_bulk_prepare_enable ( pcie - > num_clks , pcie - > clks ) ;
2021-04-20 14:17:19 +08:00
if ( err ) {
dev_err ( dev , " failed to enable clocks \n " ) ;
goto err_clk_init ;
}
return 0 ;
err_clk_init :
pm_runtime_put_sync ( dev ) ;
pm_runtime_disable ( dev ) ;
2021-12-22 19:10:45 -06:00
reset_control_assert ( pcie - > mac_reset ) ;
phy_power_off ( pcie - > phy ) ;
2021-04-20 14:17:19 +08:00
err_phy_on :
2021-12-22 19:10:45 -06:00
phy_exit ( pcie - > phy ) ;
2021-04-20 14:17:19 +08:00
err_phy_init :
2021-12-22 19:10:45 -06:00
reset_control_assert ( pcie - > phy_reset ) ;
2021-04-20 14:17:19 +08:00
return err ;
}
2021-12-22 19:10:45 -06:00
static void mtk_pcie_power_down ( struct mtk_gen3_pcie * pcie )
2021-04-20 14:17:19 +08:00
{
2021-12-22 19:10:45 -06:00
clk_bulk_disable_unprepare ( pcie - > num_clks , pcie - > clks ) ;
2021-04-20 14:17:19 +08:00
2021-12-22 19:10:45 -06:00
pm_runtime_put_sync ( pcie - > dev ) ;
pm_runtime_disable ( pcie - > dev ) ;
reset_control_assert ( pcie - > mac_reset ) ;
2021-04-20 14:17:19 +08:00
2021-12-22 19:10:45 -06:00
phy_power_off ( pcie - > phy ) ;
phy_exit ( pcie - > phy ) ;
reset_control_assert ( pcie - > phy_reset ) ;
2021-04-20 14:17:19 +08:00
}
2021-12-22 19:10:45 -06:00
static int mtk_pcie_setup ( struct mtk_gen3_pcie * pcie )
2021-04-20 14:17:19 +08:00
{
int err ;
2021-12-22 19:10:45 -06:00
err = mtk_pcie_parse_port ( pcie ) ;
2021-04-20 14:17:19 +08:00
if ( err )
return err ;
/* Don't touch the hardware registers before power up */
2021-12-22 19:10:45 -06:00
err = mtk_pcie_power_up ( pcie ) ;
2021-04-20 14:17:19 +08:00
if ( err )
return err ;
/* Try link up */
2021-12-22 19:10:45 -06:00
err = mtk_pcie_startup_port ( pcie ) ;
2021-04-20 14:17:19 +08:00
if ( err )
goto err_setup ;
2021-12-22 19:10:45 -06:00
err = mtk_pcie_setup_irq ( pcie ) ;
2021-04-20 14:17:20 +08:00
if ( err )
goto err_setup ;
2021-04-20 14:17:19 +08:00
return 0 ;
err_setup :
2021-12-22 19:10:45 -06:00
mtk_pcie_power_down ( pcie ) ;
2021-04-20 14:17:19 +08:00
return err ;
}
static int mtk_pcie_probe ( struct platform_device * pdev )
{
struct device * dev = & pdev - > dev ;
2021-12-22 19:10:45 -06:00
struct mtk_gen3_pcie * pcie ;
2021-04-20 14:17:19 +08:00
struct pci_host_bridge * host ;
int err ;
2021-12-22 19:10:45 -06:00
host = devm_pci_alloc_host_bridge ( dev , sizeof ( * pcie ) ) ;
2021-04-20 14:17:19 +08:00
if ( ! host )
return - ENOMEM ;
2021-12-22 19:10:45 -06:00
pcie = pci_host_bridge_priv ( host ) ;
2021-04-20 14:17:19 +08:00
2021-12-22 19:10:45 -06:00
pcie - > dev = dev ;
platform_set_drvdata ( pdev , pcie ) ;
2021-04-20 14:17:19 +08:00
2021-12-22 19:10:45 -06:00
err = mtk_pcie_setup ( pcie ) ;
2021-04-20 14:17:19 +08:00
if ( err )
return err ;
host - > ops = & mtk_pcie_ops ;
2021-12-22 19:10:45 -06:00
host - > sysdata = pcie ;
2021-04-20 14:17:19 +08:00
err = pci_host_probe ( host ) ;
if ( err ) {
2021-12-22 19:10:45 -06:00
mtk_pcie_irq_teardown ( pcie ) ;
mtk_pcie_power_down ( pcie ) ;
2021-04-20 14:17:19 +08:00
return err ;
}
return 0 ;
}
static int mtk_pcie_remove ( struct platform_device * pdev )
{
2021-12-22 19:10:45 -06:00
struct mtk_gen3_pcie * pcie = platform_get_drvdata ( pdev ) ;
struct pci_host_bridge * host = pci_host_bridge_from_priv ( pcie ) ;
2021-04-20 14:17:19 +08:00
pci_lock_rescan_remove ( ) ;
pci_stop_root_bus ( host - > bus ) ;
pci_remove_root_bus ( host - > bus ) ;
pci_unlock_rescan_remove ( ) ;
2021-12-22 19:10:45 -06:00
mtk_pcie_irq_teardown ( pcie ) ;
mtk_pcie_power_down ( pcie ) ;
2021-04-20 14:17:19 +08:00
return 0 ;
}
2021-12-22 19:10:45 -06:00
static void __maybe_unused mtk_pcie_irq_save ( struct mtk_gen3_pcie * pcie )
2021-04-20 14:17:22 +08:00
{
int i ;
2021-12-22 19:10:45 -06:00
raw_spin_lock ( & pcie - > irq_lock ) ;
2021-04-20 14:17:22 +08:00
2021-12-22 19:10:45 -06:00
pcie - > saved_irq_state = readl_relaxed ( pcie - > base + PCIE_INT_ENABLE_REG ) ;
2021-04-20 14:17:22 +08:00
for ( i = 0 ; i < PCIE_MSI_SET_NUM ; i + + ) {
2021-12-22 19:10:45 -06:00
struct mtk_msi_set * msi_set = & pcie - > msi_sets [ i ] ;
2021-04-20 14:17:22 +08:00
msi_set - > saved_irq_state = readl_relaxed ( msi_set - > base +
PCIE_MSI_SET_ENABLE_OFFSET ) ;
}
2021-12-22 19:10:45 -06:00
raw_spin_unlock ( & pcie - > irq_lock ) ;
2021-04-20 14:17:22 +08:00
}
2021-12-22 19:10:45 -06:00
static void __maybe_unused mtk_pcie_irq_restore ( struct mtk_gen3_pcie * pcie )
2021-04-20 14:17:22 +08:00
{
int i ;
2021-12-22 19:10:45 -06:00
raw_spin_lock ( & pcie - > irq_lock ) ;
2021-04-20 14:17:22 +08:00
2021-12-22 19:10:45 -06:00
writel_relaxed ( pcie - > saved_irq_state , pcie - > base + PCIE_INT_ENABLE_REG ) ;
2021-04-20 14:17:22 +08:00
for ( i = 0 ; i < PCIE_MSI_SET_NUM ; i + + ) {
2021-12-22 19:10:45 -06:00
struct mtk_msi_set * msi_set = & pcie - > msi_sets [ i ] ;
2021-04-20 14:17:22 +08:00
writel_relaxed ( msi_set - > saved_irq_state ,
msi_set - > base + PCIE_MSI_SET_ENABLE_OFFSET ) ;
}
2021-12-22 19:10:45 -06:00
raw_spin_unlock ( & pcie - > irq_lock ) ;
2021-04-20 14:17:22 +08:00
}
2021-12-22 19:10:45 -06:00
static int __maybe_unused mtk_pcie_turn_off_link ( struct mtk_gen3_pcie * pcie )
2021-04-20 14:17:22 +08:00
{
u32 val ;
2021-12-22 19:10:45 -06:00
val = readl_relaxed ( pcie - > base + PCIE_ICMD_PM_REG ) ;
2021-04-20 14:17:22 +08:00
val | = PCIE_TURN_OFF_LINK ;
2021-12-22 19:10:45 -06:00
writel_relaxed ( val , pcie - > base + PCIE_ICMD_PM_REG ) ;
2021-04-20 14:17:22 +08:00
/* Check the link is L2 */
2021-12-22 19:10:45 -06:00
return readl_poll_timeout ( pcie - > base + PCIE_LTSSM_STATUS_REG , val ,
2021-04-20 14:17:22 +08:00
( PCIE_LTSSM_STATE ( val ) = =
PCIE_LTSSM_STATE_L2_IDLE ) , 20 ,
50 * USEC_PER_MSEC ) ;
}
static int __maybe_unused mtk_pcie_suspend_noirq ( struct device * dev )
{
2021-12-22 19:10:45 -06:00
struct mtk_gen3_pcie * pcie = dev_get_drvdata ( dev ) ;
2021-04-20 14:17:22 +08:00
int err ;
u32 val ;
/* Trigger link to L2 state */
2021-12-22 19:10:45 -06:00
err = mtk_pcie_turn_off_link ( pcie ) ;
2021-04-20 14:17:22 +08:00
if ( err ) {
2021-12-22 19:10:45 -06:00
dev_err ( pcie - > dev , " cannot enter L2 state \n " ) ;
2021-04-20 14:17:22 +08:00
return err ;
}
/* Pull down the PERST# pin */
2021-12-22 19:10:45 -06:00
val = readl_relaxed ( pcie - > base + PCIE_RST_CTRL_REG ) ;
2021-04-20 14:17:22 +08:00
val | = PCIE_PE_RSTB ;
2021-12-22 19:10:45 -06:00
writel_relaxed ( val , pcie - > base + PCIE_RST_CTRL_REG ) ;
2021-04-20 14:17:22 +08:00
2021-12-22 19:10:45 -06:00
dev_dbg ( pcie - > dev , " entered L2 states successfully " ) ;
2021-04-20 14:17:22 +08:00
2021-12-22 19:10:45 -06:00
mtk_pcie_irq_save ( pcie ) ;
mtk_pcie_power_down ( pcie ) ;
2021-04-20 14:17:22 +08:00
return 0 ;
}
static int __maybe_unused mtk_pcie_resume_noirq ( struct device * dev )
{
2021-12-22 19:10:45 -06:00
struct mtk_gen3_pcie * pcie = dev_get_drvdata ( dev ) ;
2021-04-20 14:17:22 +08:00
int err ;
2021-12-22 19:10:45 -06:00
err = mtk_pcie_power_up ( pcie ) ;
2021-04-20 14:17:22 +08:00
if ( err )
return err ;
2021-12-22 19:10:45 -06:00
err = mtk_pcie_startup_port ( pcie ) ;
2021-04-20 14:17:22 +08:00
if ( err ) {
2021-12-22 19:10:45 -06:00
mtk_pcie_power_down ( pcie ) ;
2021-04-20 14:17:22 +08:00
return err ;
}
2021-12-22 19:10:45 -06:00
mtk_pcie_irq_restore ( pcie ) ;
2021-04-20 14:17:22 +08:00
return 0 ;
}
static const struct dev_pm_ops mtk_pcie_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS ( mtk_pcie_suspend_noirq ,
mtk_pcie_resume_noirq )
} ;
2021-04-20 14:17:19 +08:00
static const struct of_device_id mtk_pcie_of_match [ ] = {
{ . compatible = " mediatek,mt8192-pcie " } ,
{ } ,
} ;
2021-05-11 15:11:31 +08:00
MODULE_DEVICE_TABLE ( of , mtk_pcie_of_match ) ;
2021-04-20 14:17:19 +08:00
static struct platform_driver mtk_pcie_driver = {
. probe = mtk_pcie_probe ,
. remove = mtk_pcie_remove ,
. driver = {
. name = " mtk-pcie " ,
. of_match_table = mtk_pcie_of_match ,
2021-04-20 14:17:22 +08:00
. pm = & mtk_pcie_pm_ops ,
2021-04-20 14:17:19 +08:00
} ,
} ;
module_platform_driver ( mtk_pcie_driver ) ;
MODULE_LICENSE ( " GPL v2 " ) ;