2018-01-26 12:50:27 -06:00
// SPDX-License-Identifier: GPL-2.0
2014-11-05 16:45:11 +08:00
/*
* PCIe host controller driver for Freescale Layerscape SoCs
*
* Copyright ( C ) 2014 Freescale Semiconductor .
2021-12-24 17:40:00 +08:00
* Copyright 2021 NXP
2014-11-05 16:45:11 +08:00
*
2015-10-16 15:19:19 +08:00
* Author : Minghuan Lian < Minghuan . Lian @ freescale . com >
2014-11-05 16:45:11 +08:00
*/
2023-08-21 14:48:15 -04:00
# include <linux/delay.h>
2014-11-05 16:45:11 +08:00
# include <linux/kernel.h>
# include <linux/interrupt.h>
2016-07-02 19:13:27 -04:00
# include <linux/init.h>
2023-08-21 14:48:15 -04:00
# include <linux/iopoll.h>
2014-11-05 16:45:11 +08:00
# include <linux/of_pci.h>
# include <linux/of_platform.h>
# include <linux/of_address.h>
# include <linux/pci.h>
# include <linux/platform_device.h>
# include <linux/resource.h>
# include <linux/mfd/syscon.h>
# include <linux/regmap.h>
2023-08-21 14:48:15 -04:00
# include "../../pci.h"
2014-11-05 16:45:11 +08:00
# include "pcie-designware.h"
2015-10-16 15:19:19 +08:00
/* PEX Internal Configuration Registers */
# define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
2017-10-12 17:44:48 +08:00
# define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
# define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
2015-10-16 15:19:19 +08:00
2023-08-21 14:48:15 -04:00
/* PF Message Command Register */
# define LS_PCIE_PF_MCR 0x2c
# define PF_MCR_PTOMR BIT(0)
# define PF_MCR_EXL2S BIT(1)
2023-12-04 11:08:27 -05:00
/* LS1021A PEXn PM Write Control Register */
# define SCFG_PEXPMWRCR(idx) (0x5c + (idx) * 0x64)
# define PMXMTTURNOFF BIT(31)
# define SCFG_PEXSFTRSTCR 0x190
# define PEXSR(idx) BIT(idx)
2023-12-04 11:08:29 -05:00
/* LS1043A PEX PME control register */
# define SCFG_PEXPMECR 0x144
# define PEXPME(idx) BIT(31 - (idx) * 4)
/* LS1043A PEX LUT debug register */
# define LS_PCIE_LDBG 0x7fc
# define LDBG_SR BIT(30)
# define LDBG_WE BIT(31)
2017-08-28 18:52:58 +08:00
# define PCIE_IATU_NUM 6
2023-08-21 14:48:15 -04:00
struct ls_pcie_drvdata {
2023-12-04 11:08:28 -05:00
const u32 pf_lut_off ;
2023-12-04 11:08:27 -05:00
const struct dw_pcie_host_ops * ops ;
2023-12-04 11:08:26 -05:00
int ( * exit_from_l2 ) ( struct dw_pcie_rp * pp ) ;
2023-12-04 11:08:27 -05:00
bool scfg_support ;
2023-08-21 14:48:15 -04:00
bool pm_support ;
} ;
2014-11-05 16:45:11 +08:00
struct ls_pcie {
2017-02-15 18:48:14 +05:30
struct dw_pcie * pci ;
2023-08-21 14:48:15 -04:00
const struct ls_pcie_drvdata * drvdata ;
2023-12-04 11:08:28 -05:00
void __iomem * pf_lut_base ;
2023-12-04 11:08:27 -05:00
struct regmap * scfg ;
int index ;
2023-08-21 14:48:15 -04:00
bool big_endian ;
2014-11-05 16:45:11 +08:00
} ;
2023-12-04 11:08:28 -05:00
# define ls_pcie_pf_lut_readl_addr(addr) ls_pcie_pf_lut_readl(pcie, addr)
2017-02-15 18:48:14 +05:30
# define to_ls_pcie(x) dev_get_drvdata((x)->dev)
2014-11-05 16:45:11 +08:00
2015-10-16 15:19:16 +08:00
static bool ls_pcie_is_bridge ( struct ls_pcie * pcie )
{
2017-02-15 18:48:14 +05:30
struct dw_pcie * pci = pcie - > pci ;
2015-10-16 15:19:16 +08:00
u32 header_type ;
2017-02-15 18:48:14 +05:30
header_type = ioread8 ( pci - > dbi_base + PCI_HEADER_TYPE ) ;
2023-10-03 15:53:00 +03:00
header_type & = PCI_HEADER_TYPE_MASK ;
2015-10-16 15:19:16 +08:00
return header_type = = PCI_HEADER_TYPE_BRIDGE ;
}
2015-10-16 15:19:19 +08:00
/* Clear multi-function bit */
static void ls_pcie_clear_multifunction ( struct ls_pcie * pcie )
{
2017-02-15 18:48:14 +05:30
struct dw_pcie * pci = pcie - > pci ;
iowrite8 ( PCI_HEADER_TYPE_BRIDGE , pci - > dbi_base + PCI_HEADER_TYPE ) ;
2015-10-16 15:19:19 +08:00
}
2016-02-29 17:24:15 -06:00
/* Drop MSG TLP except for Vendor MSG */
static void ls_pcie_drop_msg_tlp ( struct ls_pcie * pcie )
{
u32 val ;
2017-02-15 18:48:14 +05:30
struct dw_pcie * pci = pcie - > pci ;
2016-02-29 17:24:15 -06:00
2017-02-15 18:48:14 +05:30
val = ioread32 ( pci - > dbi_base + PCIE_STRFMR1 ) ;
2016-02-29 17:24:15 -06:00
val & = 0xDFFFFFFF ;
2017-02-15 18:48:14 +05:30
iowrite32 ( val , pci - > dbi_base + PCIE_STRFMR1 ) ;
2016-02-29 17:24:15 -06:00
}
2017-10-12 17:44:48 +08:00
/* Forward error response of outbound non-posted requests */
static void ls_pcie_fix_error_response ( struct ls_pcie * pcie )
{
struct dw_pcie * pci = pcie - > pci ;
iowrite32 ( PCIE_ABSERR_SETTING , pci - > dbi_base + PCIE_ABSERR ) ;
}
2023-12-04 11:08:28 -05:00
static u32 ls_pcie_pf_lut_readl ( struct ls_pcie * pcie , u32 off )
2023-08-21 14:48:15 -04:00
{
if ( pcie - > big_endian )
2023-12-04 11:08:28 -05:00
return ioread32be ( pcie - > pf_lut_base + off ) ;
2023-08-21 14:48:15 -04:00
2023-12-04 11:08:28 -05:00
return ioread32 ( pcie - > pf_lut_base + off ) ;
2023-08-21 14:48:15 -04:00
}
2023-12-04 11:08:28 -05:00
static void ls_pcie_pf_lut_writel ( struct ls_pcie * pcie , u32 off , u32 val )
2023-08-21 14:48:15 -04:00
{
if ( pcie - > big_endian )
2023-12-04 11:08:28 -05:00
iowrite32be ( val , pcie - > pf_lut_base + off ) ;
2023-08-21 14:48:15 -04:00
else
2023-12-04 11:08:28 -05:00
iowrite32 ( val , pcie - > pf_lut_base + off ) ;
2023-08-21 14:48:15 -04:00
}
static void ls_pcie_send_turnoff_msg ( struct dw_pcie_rp * pp )
{
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
struct ls_pcie * pcie = to_ls_pcie ( pci ) ;
u32 val ;
int ret ;
2023-12-04 11:08:28 -05:00
val = ls_pcie_pf_lut_readl ( pcie , LS_PCIE_PF_MCR ) ;
2023-08-21 14:48:15 -04:00
val | = PF_MCR_PTOMR ;
2023-12-04 11:08:28 -05:00
ls_pcie_pf_lut_writel ( pcie , LS_PCIE_PF_MCR , val ) ;
2023-08-21 14:48:15 -04:00
2023-12-04 11:08:28 -05:00
ret = readx_poll_timeout ( ls_pcie_pf_lut_readl_addr , LS_PCIE_PF_MCR ,
2023-08-21 14:48:15 -04:00
val , ! ( val & PF_MCR_PTOMR ) ,
PCIE_PME_TO_L2_TIMEOUT_US / 10 ,
PCIE_PME_TO_L2_TIMEOUT_US ) ;
if ( ret )
dev_err ( pcie - > pci - > dev , " PME_Turn_off timeout \n " ) ;
}
2023-12-04 11:08:26 -05:00
static int ls_pcie_exit_from_l2 ( struct dw_pcie_rp * pp )
2023-08-21 14:48:15 -04:00
{
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
struct ls_pcie * pcie = to_ls_pcie ( pci ) ;
u32 val ;
int ret ;
/*
* Set PF_MCR_EXL2S bit in LS_PCIE_PF_MCR register for the link
* to exit L2 state .
*/
2023-12-04 11:08:28 -05:00
val = ls_pcie_pf_lut_readl ( pcie , LS_PCIE_PF_MCR ) ;
2023-08-21 14:48:15 -04:00
val | = PF_MCR_EXL2S ;
2023-12-04 11:08:28 -05:00
ls_pcie_pf_lut_writel ( pcie , LS_PCIE_PF_MCR , val ) ;
2023-08-21 14:48:15 -04:00
/*
* L2 exit timeout of 10 ms is not defined in the specifications ,
* it was chosen based on empirical observations .
*/
2023-12-04 11:08:28 -05:00
ret = readx_poll_timeout ( ls_pcie_pf_lut_readl_addr , LS_PCIE_PF_MCR ,
2023-08-21 14:48:15 -04:00
val , ! ( val & PF_MCR_EXL2S ) ,
1000 ,
10000 ) ;
if ( ret )
dev_err ( pcie - > pci - > dev , " L2 exit timeout \n " ) ;
2023-12-04 11:08:26 -05:00
return ret ;
2023-08-21 14:48:15 -04:00
}
2022-06-24 17:34:25 +03:00
static int ls_pcie_host_init ( struct dw_pcie_rp * pp )
2017-08-28 18:52:56 +08:00
{
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
struct ls_pcie * pcie = to_ls_pcie ( pci ) ;
2017-10-12 17:44:48 +08:00
ls_pcie_fix_error_response ( pcie ) ;
2017-08-28 18:52:58 +08:00
2017-08-28 18:52:59 +08:00
dw_pcie_dbi_ro_wr_en ( pci ) ;
2017-08-28 18:52:56 +08:00
ls_pcie_clear_multifunction ( pcie ) ;
2017-08-28 18:52:59 +08:00
dw_pcie_dbi_ro_wr_dis ( pci ) ;
2017-08-28 18:52:56 +08:00
ls_pcie_drop_msg_tlp ( pcie ) ;
return 0 ;
}
2023-12-04 11:08:27 -05:00
static void scfg_pcie_send_turnoff_msg ( struct regmap * scfg , u32 reg , u32 mask )
{
/* Send PME_Turn_Off message */
regmap_write_bits ( scfg , reg , mask , mask ) ;
/*
* There is no specific register to check for PME_To_Ack from endpoint .
* So on the safe side , wait for PCIE_PME_TO_L2_TIMEOUT_US .
*/
mdelay ( PCIE_PME_TO_L2_TIMEOUT_US / 1000 ) ;
/*
* Layerscape hardware reference manual recommends clearing the PMXMTTURNOFF bit
* to complete the PME_Turn_Off handshake .
*/
regmap_write_bits ( scfg , reg , mask , 0 ) ;
}
static void ls1021a_pcie_send_turnoff_msg ( struct dw_pcie_rp * pp )
{
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
struct ls_pcie * pcie = to_ls_pcie ( pci ) ;
scfg_pcie_send_turnoff_msg ( pcie - > scfg , SCFG_PEXPMWRCR ( pcie - > index ) , PMXMTTURNOFF ) ;
}
static int scfg_pcie_exit_from_l2 ( struct regmap * scfg , u32 reg , u32 mask )
{
/* Reset the PEX wrapper to bring the link out of L2 */
regmap_write_bits ( scfg , reg , mask , mask ) ;
regmap_write_bits ( scfg , reg , mask , 0 ) ;
return 0 ;
}
static int ls1021a_pcie_exit_from_l2 ( struct dw_pcie_rp * pp )
{
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
struct ls_pcie * pcie = to_ls_pcie ( pci ) ;
return scfg_pcie_exit_from_l2 ( pcie - > scfg , SCFG_PEXSFTRSTCR , PEXSR ( pcie - > index ) ) ;
}
2023-12-04 11:08:29 -05:00
static void ls1043a_pcie_send_turnoff_msg ( struct dw_pcie_rp * pp )
{
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
struct ls_pcie * pcie = to_ls_pcie ( pci ) ;
scfg_pcie_send_turnoff_msg ( pcie - > scfg , SCFG_PEXPMECR , PEXPME ( pcie - > index ) ) ;
}
static int ls1043a_pcie_exit_from_l2 ( struct dw_pcie_rp * pp )
{
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
struct ls_pcie * pcie = to_ls_pcie ( pci ) ;
u32 val ;
/*
* Reset the PEX wrapper to bring the link out of L2 .
* LDBG_WE : allows the user to have write access to the PEXDBG [ SR ] for both setting and
* clearing the soft reset on the PEX module .
* LDBG_SR : When SR is set to 1 , the PEX module enters soft reset .
*/
val = ls_pcie_pf_lut_readl ( pcie , LS_PCIE_LDBG ) ;
val | = LDBG_WE ;
ls_pcie_pf_lut_writel ( pcie , LS_PCIE_LDBG , val ) ;
val = ls_pcie_pf_lut_readl ( pcie , LS_PCIE_LDBG ) ;
val | = LDBG_SR ;
ls_pcie_pf_lut_writel ( pcie , LS_PCIE_LDBG , val ) ;
val = ls_pcie_pf_lut_readl ( pcie , LS_PCIE_LDBG ) ;
val & = ~ LDBG_SR ;
ls_pcie_pf_lut_writel ( pcie , LS_PCIE_LDBG , val ) ;
val = ls_pcie_pf_lut_readl ( pcie , LS_PCIE_LDBG ) ;
val & = ~ LDBG_WE ;
ls_pcie_pf_lut_writel ( pcie , LS_PCIE_LDBG , val ) ;
return 0 ;
}
2017-06-05 16:53:46 +08:00
static const struct dw_pcie_host_ops ls_pcie_host_ops = {
2023-12-20 14:38:24 +09:00
. init = ls_pcie_host_init ,
2023-08-21 14:48:15 -04:00
. pme_turn_off = ls_pcie_send_turnoff_msg ,
} ;
2023-12-04 11:08:27 -05:00
static const struct dw_pcie_host_ops ls1021a_pcie_host_ops = {
2024-01-15 12:10:38 -06:00
. init = ls_pcie_host_init ,
2023-12-04 11:08:27 -05:00
. pme_turn_off = ls1021a_pcie_send_turnoff_msg ,
} ;
2023-08-21 14:48:15 -04:00
static const struct ls_pcie_drvdata ls1021a_drvdata = {
2023-12-04 11:08:27 -05:00
. pm_support = true ,
. scfg_support = true ,
. ops = & ls1021a_pcie_host_ops ,
. exit_from_l2 = ls1021a_pcie_exit_from_l2 ,
2023-08-21 14:48:15 -04:00
} ;
2023-12-04 11:08:29 -05:00
static const struct dw_pcie_host_ops ls1043a_pcie_host_ops = {
2024-01-15 12:10:38 -06:00
. init = ls_pcie_host_init ,
2023-12-04 11:08:29 -05:00
. pme_turn_off = ls1043a_pcie_send_turnoff_msg ,
} ;
static const struct ls_pcie_drvdata ls1043a_drvdata = {
. pf_lut_off = 0x10000 ,
. pm_support = true ,
. scfg_support = true ,
. ops = & ls1043a_pcie_host_ops ,
. exit_from_l2 = ls1043a_pcie_exit_from_l2 ,
2023-08-21 14:48:15 -04:00
} ;
static const struct ls_pcie_drvdata layerscape_drvdata = {
2023-12-04 11:08:28 -05:00
. pf_lut_off = 0xc0000 ,
2023-08-21 14:48:15 -04:00
. pm_support = true ,
2023-12-04 11:08:27 -05:00
. ops = & ls_pcie_host_ops ,
2023-12-04 11:08:26 -05:00
. exit_from_l2 = ls_pcie_exit_from_l2 ,
2015-10-16 15:19:19 +08:00
} ;
2015-10-16 15:19:17 +08:00
static const struct of_device_id ls_pcie_of_match [ ] = {
2023-08-21 14:48:15 -04:00
{ . compatible = " fsl,ls1012a-pcie " , . data = & layerscape_drvdata } ,
{ . compatible = " fsl,ls1021a-pcie " , . data = & ls1021a_drvdata } ,
{ . compatible = " fsl,ls1028a-pcie " , . data = & layerscape_drvdata } ,
2023-12-04 11:08:29 -05:00
{ . compatible = " fsl,ls1043a-pcie " , . data = & ls1043a_drvdata } ,
2023-08-21 14:48:15 -04:00
{ . compatible = " fsl,ls1046a-pcie " , . data = & layerscape_drvdata } ,
{ . compatible = " fsl,ls2080a-pcie " , . data = & layerscape_drvdata } ,
{ . compatible = " fsl,ls2085a-pcie " , . data = & layerscape_drvdata } ,
{ . compatible = " fsl,ls2088a-pcie " , . data = & layerscape_drvdata } ,
{ . compatible = " fsl,ls1088a-pcie " , . data = & layerscape_drvdata } ,
2015-10-16 15:19:17 +08:00
{ } ,
} ;
2021-01-20 11:52:46 +01:00
static int ls_pcie_probe ( struct platform_device * pdev )
2014-11-05 16:45:11 +08:00
{
2016-10-06 13:38:05 -05:00
struct device * dev = & pdev - > dev ;
2017-02-15 18:48:14 +05:30
struct dw_pcie * pci ;
2014-11-05 16:45:11 +08:00
struct ls_pcie * pcie ;
struct resource * dbi_base ;
2023-12-04 11:08:27 -05:00
u32 index [ 2 ] ;
int ret ;
2014-11-05 16:45:11 +08:00
2016-10-06 13:38:05 -05:00
pcie = devm_kzalloc ( dev , sizeof ( * pcie ) , GFP_KERNEL ) ;
2014-11-05 16:45:11 +08:00
if ( ! pcie )
return - ENOMEM ;
2017-02-15 18:48:14 +05:30
pci = devm_kzalloc ( dev , sizeof ( * pci ) , GFP_KERNEL ) ;
if ( ! pci )
return - ENOMEM ;
2023-08-21 14:48:15 -04:00
pcie - > drvdata = of_device_get_match_data ( dev ) ;
2017-02-15 18:48:14 +05:30
pci - > dev = dev ;
2017-02-25 02:08:12 -08:00
pcie - > pci = pci ;
2023-12-04 11:08:27 -05:00
pci - > pp . ops = pcie - > drvdata - > ops ;
2017-02-25 02:08:12 -08:00
2014-11-05 16:45:11 +08:00
dbi_base = platform_get_resource_byname ( pdev , IORESOURCE_MEM , " regs " ) ;
2017-04-19 17:49:08 +01:00
pci - > dbi_base = devm_pci_remap_cfg_resource ( dev , dbi_base ) ;
2017-02-15 18:48:14 +05:30
if ( IS_ERR ( pci - > dbi_base ) )
return PTR_ERR ( pci - > dbi_base ) ;
2014-11-05 16:45:11 +08:00
2023-08-21 14:48:15 -04:00
pcie - > big_endian = of_property_read_bool ( dev - > of_node , " big-endian " ) ;
2023-12-04 11:08:28 -05:00
pcie - > pf_lut_base = pci - > dbi_base + pcie - > drvdata - > pf_lut_off ;
2023-08-21 14:48:15 -04:00
2023-12-04 11:08:27 -05:00
if ( pcie - > drvdata - > scfg_support ) {
pcie - > scfg = syscon_regmap_lookup_by_phandle ( dev - > of_node , " fsl,pcie-scfg " ) ;
if ( IS_ERR ( pcie - > scfg ) ) {
dev_err ( dev , " No syscfg phandle specified \n " ) ;
return PTR_ERR ( pcie - > scfg ) ;
}
ret = of_property_read_u32_array ( dev - > of_node , " fsl,pcie-scfg " , index , 2 ) ;
if ( ret )
return ret ;
pcie - > index = index [ 1 ] ;
}
2023-08-21 14:48:15 -04:00
2015-10-16 15:19:16 +08:00
if ( ! ls_pcie_is_bridge ( pcie ) )
return - ENODEV ;
2017-02-15 18:48:11 +05:30
platform_set_drvdata ( pdev , pcie ) ;
2020-11-05 15:11:56 -06:00
return dw_pcie_host_init ( & pci - > pp ) ;
2014-11-05 16:45:11 +08:00
}
2023-08-21 14:48:15 -04:00
static int ls_pcie_suspend_noirq ( struct device * dev )
{
struct ls_pcie * pcie = dev_get_drvdata ( dev ) ;
if ( ! pcie - > drvdata - > pm_support )
return 0 ;
return dw_pcie_suspend_noirq ( pcie - > pci ) ;
}
static int ls_pcie_resume_noirq ( struct device * dev )
{
struct ls_pcie * pcie = dev_get_drvdata ( dev ) ;
2023-12-04 11:08:26 -05:00
int ret ;
2023-08-21 14:48:15 -04:00
if ( ! pcie - > drvdata - > pm_support )
return 0 ;
2023-12-04 11:08:26 -05:00
ret = pcie - > drvdata - > exit_from_l2 ( & pcie - > pci - > pp ) ;
if ( ret )
return ret ;
2023-08-21 14:48:15 -04:00
return dw_pcie_resume_noirq ( pcie - > pci ) ;
}
static const struct dev_pm_ops ls_pcie_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS ( ls_pcie_suspend_noirq , ls_pcie_resume_noirq )
} ;
2014-11-05 16:45:11 +08:00
static struct platform_driver ls_pcie_driver = {
2021-01-20 11:52:46 +01:00
. probe = ls_pcie_probe ,
2014-11-05 16:45:11 +08:00
. driver = {
. name = " layerscape-pcie " ,
. of_match_table = ls_pcie_of_match ,
2017-04-20 15:36:25 -05:00
. suppress_bind_attrs = true ,
2023-08-21 14:48:15 -04:00
. pm = & ls_pcie_pm_ops ,
2014-11-05 16:45:11 +08:00
} ,
} ;
2021-01-20 11:52:46 +01:00
builtin_platform_driver ( ls_pcie_driver ) ;