linux/drivers/pci/controller/dwc/pcie-designware.c
Serge Semin ef69f852a9 PCI: dwc: Introduce generic platform clocks and resets
Currently almost each platform driver uses its own resets and clocks
naming in order to get the corresponding descriptors. It makes the code
harder to maintain and comprehend especially seeing the DWC PCIe core main
resets and clocks signals set hasn't changed much for about at least one
major IP-core release. So in order to organize things around these signals
we suggest to create a generic interface for them in accordance with the
naming introduced in the DWC PCIe IP-core reference manual:

Application clocks:
- "dbi"  - data bus interface clock (on some DWC PCIe platforms it's
           referred as "pclk", "pcie", "sys", "ahb", "cfg", "iface",
           "gio", "reg", "pcie_apb_sys");
- "mstr" - AXI-bus master interface clock (some DWC PCIe glue drivers
           refer to this clock as "port", "bus", "pcie_bus",
           "bus_master/master_bus/axi_m", "pcie_aclk");
- "slv"  - AXI-bus slave interface clock (also called as "port", "bus",
           "pcie_bus", "bus_slave/slave_bus/axi_s", "pcie_aclk",
           "pcie_inbound_axi").

Core clocks:
- "pipe" - core-PCS PIPE interface clock coming from external PHY (it's
           normally named by the platform drivers as just "pipe");
- "core" - primary clock of the controller (none of the platform drivers
           declare such a clock but in accordance with the ref. manual
           the devices may have it separately specified);
- "aux"  - auxiliary PMC domain clock (it is named by some platforms as
           "pcie_aux" and just "aux");
- "ref"  - Generic reference clock (it is a generic clock source, which
           can be used as a signal source for multiple interfaces, some
           platforms call it as "ref", "general", "pcie_phy",
           "pcie_phy_ref").

Application resets:
- "dbi"  - Data-bus interface reset (it's CSR interface clock and is
           normally called as "apb" though technically it's not APB but
           DWC PCIe-specific interface);
- "mstr" - AXI-bus master reset (some platforms call it as "port", "apps",
           "bus", "axi_m");
- "slv"  - ABI-bus slave reset (some platforms call it as "port", "apps",
           "bus", "axi_s").

Core resets:
- "non-sticky" - non-sticky CSR flags reset;
- "sticky"     - sticky CSR flags reset;
- "pipe"       - PIPE-interface (Core-PCS) logic reset (some platforms
                 call it just "pipe");
- "core"       - controller primary reset (resets everything except PMC
                 module, some platforms refer to this signal as "soft",
                 "pci");
- "phy"        - PCS/PHY block reset (strictly speaking it is normally
                 connected to the input of an external block, but the
                 reference manual says it must be available for the PMC
                 working correctly, some existing platforms call it
                 "pciephy", "phy", "link");
- "hot"        - PMC hot reset signal (also called as "sleep");
- "pwr"        - cold reset signal (can be referred as "pwr", "turnoff").

Bus reset:
- "perst" - PCIe standard signal used to reset the PCIe peripheral
            devices.

As you can see each platform uses it's own naming for basically the same
set of the signals. In the framework of this commit we suggest to add a
set of the clocks and reset signals resources, corresponding names and
identifiers for each denoted entity. At current stage the platforms will
be able to use the provided infrastructure to automatically request all
these resources and manipulate with them in the Host/EP init callbacks.
Alas it isn't that easy to create a common cold/hot reset procedure due to
too many platform-specifics in the procedure, like the external flags
exposure and the delays requirement.

Link: https://lore.kernel.org/r/20221113191301.5526-20-Sergey.Semin@baikalelectronics.ru
Signed-off-by: Serge Semin <Sergey.Semin@baikalelectronics.ru>
Signed-off-by: Lorenzo Pieralisi <lpieralisi@kernel.org>
2022-11-23 16:01:55 +01:00

867 lines
23 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Synopsys DesignWare PCIe host controller driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* https://www.samsung.com
*
* Author: Jingoo Han <jg1.han@samsung.com>
*/
#include <linux/align.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/sizes.h>
#include <linux/types.h>
#include "../../pci.h"
#include "pcie-designware.h"
static const char * const dw_pcie_app_clks[DW_PCIE_NUM_APP_CLKS] = {
[DW_PCIE_DBI_CLK] = "dbi",
[DW_PCIE_MSTR_CLK] = "mstr",
[DW_PCIE_SLV_CLK] = "slv",
};
static const char * const dw_pcie_core_clks[DW_PCIE_NUM_CORE_CLKS] = {
[DW_PCIE_PIPE_CLK] = "pipe",
[DW_PCIE_CORE_CLK] = "core",
[DW_PCIE_AUX_CLK] = "aux",
[DW_PCIE_REF_CLK] = "ref",
};
static const char * const dw_pcie_app_rsts[DW_PCIE_NUM_APP_RSTS] = {
[DW_PCIE_DBI_RST] = "dbi",
[DW_PCIE_MSTR_RST] = "mstr",
[DW_PCIE_SLV_RST] = "slv",
};
static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = {
[DW_PCIE_NON_STICKY_RST] = "non-sticky",
[DW_PCIE_STICKY_RST] = "sticky",
[DW_PCIE_CORE_RST] = "core",
[DW_PCIE_PIPE_RST] = "pipe",
[DW_PCIE_PHY_RST] = "phy",
[DW_PCIE_HOT_RST] = "hot",
[DW_PCIE_PWR_RST] = "pwr",
};
static int dw_pcie_get_clocks(struct dw_pcie *pci)
{
int i, ret;
for (i = 0; i < DW_PCIE_NUM_APP_CLKS; i++)
pci->app_clks[i].id = dw_pcie_app_clks[i];
for (i = 0; i < DW_PCIE_NUM_CORE_CLKS; i++)
pci->core_clks[i].id = dw_pcie_core_clks[i];
ret = devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_APP_CLKS,
pci->app_clks);
if (ret)
return ret;
return devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_CORE_CLKS,
pci->core_clks);
}
static int dw_pcie_get_resets(struct dw_pcie *pci)
{
int i, ret;
for (i = 0; i < DW_PCIE_NUM_APP_RSTS; i++)
pci->app_rsts[i].id = dw_pcie_app_rsts[i];
for (i = 0; i < DW_PCIE_NUM_CORE_RSTS; i++)
pci->core_rsts[i].id = dw_pcie_core_rsts[i];
ret = devm_reset_control_bulk_get_optional_shared(pci->dev,
DW_PCIE_NUM_APP_RSTS,
pci->app_rsts);
if (ret)
return ret;
ret = devm_reset_control_bulk_get_optional_exclusive(pci->dev,
DW_PCIE_NUM_CORE_RSTS,
pci->core_rsts);
if (ret)
return ret;
pci->pe_rst = devm_gpiod_get_optional(pci->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(pci->pe_rst))
return PTR_ERR(pci->pe_rst);
return 0;
}
int dw_pcie_get_resources(struct dw_pcie *pci)
{
struct platform_device *pdev = to_platform_device(pci->dev);
struct device_node *np = dev_of_node(pci->dev);
struct resource *res;
int ret;
if (!pci->dbi_base) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
}
/* DBI2 is mainly useful for the endpoint controller */
if (!pci->dbi_base2) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
if (res) {
pci->dbi_base2 = devm_pci_remap_cfg_resource(pci->dev, res);
if (IS_ERR(pci->dbi_base2))
return PTR_ERR(pci->dbi_base2);
} else {
pci->dbi_base2 = pci->dbi_base + SZ_4K;
}
}
/* For non-unrolled iATU/eDMA platforms this range will be ignored */
if (!pci->atu_base) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
if (res) {
pci->atu_size = resource_size(res);
pci->atu_base = devm_ioremap_resource(pci->dev, res);
if (IS_ERR(pci->atu_base))
return PTR_ERR(pci->atu_base);
} else {
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
}
}
/* Set a default value suitable for at most 8 in and 8 out windows */
if (!pci->atu_size)
pci->atu_size = SZ_4K;
/* LLDD is supposed to manually switch the clocks and resets state */
if (dw_pcie_cap_is(pci, REQ_RES)) {
ret = dw_pcie_get_clocks(pci);
if (ret)
return ret;
ret = dw_pcie_get_resets(pci);
if (ret)
return ret;
}
if (pci->link_gen < 1)
pci->link_gen = of_pci_get_max_link_speed(np);
of_property_read_u32(np, "num-lanes", &pci->num_lanes);
if (of_property_read_bool(np, "snps,enable-cdm-check"))
dw_pcie_cap_set(pci, CDM_CHECK);
return 0;
}
void dw_pcie_version_detect(struct dw_pcie *pci)
{
u32 ver;
/* The content of the CSR is zero on DWC PCIe older than v4.70a */
ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_NUMBER);
if (!ver)
return;
if (pci->version && pci->version != ver)
dev_warn(pci->dev, "Versions don't match (%08x != %08x)\n",
pci->version, ver);
else
pci->version = ver;
ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_TYPE);
if (pci->type && pci->type != ver)
dev_warn(pci->dev, "Types don't match (%08x != %08x)\n",
pci->type, ver);
else
pci->type = ver;
}
/*
* These interfaces resemble the pci_find_*capability() interfaces, but these
* are for configuring host controllers, which are bridges *to* PCI devices but
* are not PCI devices themselves.
*/
static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
u8 cap)
{
u8 cap_id, next_cap_ptr;
u16 reg;
if (!cap_ptr)
return 0;
reg = dw_pcie_readw_dbi(pci, cap_ptr);
cap_id = (reg & 0x00ff);
if (cap_id > PCI_CAP_ID_MAX)
return 0;
if (cap_id == cap)
return cap_ptr;
next_cap_ptr = (reg & 0xff00) >> 8;
return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
}
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
{
u8 next_cap_ptr;
u16 reg;
reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
next_cap_ptr = (reg & 0x00ff);
return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
}
EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
u8 cap)
{
u32 header;
int ttl;
int pos = PCI_CFG_SPACE_SIZE;
/* minimum 8 bytes per capability */
ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
if (start)
pos = start;
header = dw_pcie_readl_dbi(pci, pos);
/*
* If we have no capabilities, this is indicated by cap ID,
* cap version and next pointer all being 0.
*/
if (header == 0)
return 0;
while (ttl-- > 0) {
if (PCI_EXT_CAP_ID(header) == cap && pos != start)
return pos;
pos = PCI_EXT_CAP_NEXT(header);
if (pos < PCI_CFG_SPACE_SIZE)
break;
header = dw_pcie_readl_dbi(pci, pos);
}
return 0;
}
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
{
return dw_pcie_find_next_ext_capability(pci, 0, cap);
}
EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
int dw_pcie_read(void __iomem *addr, int size, u32 *val)
{
if (!IS_ALIGNED((uintptr_t)addr, size)) {
*val = 0;
return PCIBIOS_BAD_REGISTER_NUMBER;
}
if (size == 4) {
*val = readl(addr);
} else if (size == 2) {
*val = readw(addr);
} else if (size == 1) {
*val = readb(addr);
} else {
*val = 0;
return PCIBIOS_BAD_REGISTER_NUMBER;
}
return PCIBIOS_SUCCESSFUL;
}
EXPORT_SYMBOL_GPL(dw_pcie_read);
int dw_pcie_write(void __iomem *addr, int size, u32 val)
{
if (!IS_ALIGNED((uintptr_t)addr, size))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (size == 4)
writel(val, addr);
else if (size == 2)
writew(val, addr);
else if (size == 1)
writeb(val, addr);
else
return PCIBIOS_BAD_REGISTER_NUMBER;
return PCIBIOS_SUCCESSFUL;
}
EXPORT_SYMBOL_GPL(dw_pcie_write);
u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
{
int ret;
u32 val;
if (pci->ops && pci->ops->read_dbi)
return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
if (ret)
dev_err(pci->dev, "Read DBI address failed\n");
return val;
}
EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
{
int ret;
if (pci->ops && pci->ops->write_dbi) {
pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
return;
}
ret = dw_pcie_write(pci->dbi_base + reg, size, val);
if (ret)
dev_err(pci->dev, "Write DBI address failed\n");
}
EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
{
int ret;
if (pci->ops && pci->ops->write_dbi2) {
pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
return;
}
ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
if (ret)
dev_err(pci->dev, "write DBI address failed\n");
}
static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
u32 index)
{
if (dw_pcie_cap_is(pci, IATU_UNROLL))
return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index);
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index);
return pci->atu_base;
}
static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg)
{
void __iomem *base;
int ret;
u32 val;
base = dw_pcie_select_atu(pci, dir, index);
if (pci->ops && pci->ops->read_dbi)
return pci->ops->read_dbi(pci, base, reg, 4);
ret = dw_pcie_read(base + reg, 4, &val);
if (ret)
dev_err(pci->dev, "Read ATU address failed\n");
return val;
}
static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 dir, u32 index,
u32 reg, u32 val)
{
void __iomem *base;
int ret;
base = dw_pcie_select_atu(pci, dir, index);
if (pci->ops && pci->ops->write_dbi) {
pci->ops->write_dbi(pci, base, reg, 4, val);
return;
}
ret = dw_pcie_write(base + reg, 4, val);
if (ret)
dev_err(pci->dev, "Write ATU address failed\n");
}
static inline u32 dw_pcie_readl_atu_ob(struct dw_pcie *pci, u32 index, u32 reg)
{
return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg);
}
static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg,
u32 val)
{
dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg, val);
}
static inline u32 dw_pcie_enable_ecrc(u32 val)
{
/*
* DesignWare core version 4.90A has a design issue where the 'TD'
* bit in the Control register-1 of the ATU outbound region acts
* like an override for the ECRC setting, i.e., the presence of TLP
* Digest (ECRC) in the outgoing TLPs is solely determined by this
* bit. This is contrary to the PCIe spec which says that the
* enablement of the ECRC is solely determined by the AER
* registers.
*
* Because of this, even when the ECRC is enabled through AER
* registers, the transactions going through ATU won't have TLP
* Digest as there is no way the PCI core AER code could program
* the TD bit which is specific to the DesignWare core.
*
* The best way to handle this scenario is to program the TD bit
* always. It affects only the traffic from root port to downstream
* devices.
*
* At this point,
* When ECRC is enabled in AER registers, everything works normally
* When ECRC is NOT enabled in AER registers, then,
* on Root Port:- TLP Digest (DWord size) gets appended to each packet
* even through it is not required. Since downstream
* TLPs are mostly for configuration accesses and BAR
* accesses, they are not in critical path and won't
* have much negative effect on the performance.
* on End Point:- TLP Digest is received for some/all the packets coming
* from the root port. TLP Digest is ignored because,
* as per the PCIe Spec r5.0 v1.0 section 2.2.3
* "TLP Digest Rules", when an endpoint receives TLP
* Digest when its ECRC check functionality is disabled
* in AER registers, received TLP Digest is just ignored.
* Since there is no issue or error reported either side, best way to
* handle the scenario is to program TD bit by default.
*/
return val | PCIE_ATU_TD;
}
static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
int index, int type, u64 cpu_addr,
u64 pci_addr, u64 size)
{
u32 retries, val;
u64 limit_addr;
if (pci->ops && pci->ops->cpu_addr_fixup)
cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
limit_addr = cpu_addr + size - 1;
if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
!IS_ALIGNED(cpu_addr, pci->region_align) ||
!IS_ALIGNED(pci_addr, pci->region_align) || !size) {
return -EINVAL;
}
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE,
lower_32_bits(cpu_addr));
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE,
upper_32_bits(cpu_addr));
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT,
lower_32_bits(limit_addr));
if (dw_pcie_ver_is_ge(pci, 460A))
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT,
upper_32_bits(limit_addr));
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET,
lower_32_bits(pci_addr));
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET,
upper_32_bits(pci_addr));
val = type | PCIE_ATU_FUNC_NUM(func_no);
if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
dw_pcie_ver_is_ge(pci, 460A))
val |= PCIE_ATU_INCREASE_REGION_SIZE;
if (dw_pcie_ver_is(pci, 490A))
val = dw_pcie_enable_ecrc(val);
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val);
dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
return 0;
mdelay(LINK_WAIT_IATU);
}
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
return -ETIMEDOUT;
}
int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
u64 cpu_addr, u64 pci_addr, u64 size)
{
return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
cpu_addr, pci_addr, size);
}
int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
int type, u64 cpu_addr, u64 pci_addr,
u64 size)
{
return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
cpu_addr, pci_addr, size);
}
static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
{
return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
}
static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg,
u32 val)
{
dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val);
}
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
u64 cpu_addr, u64 pci_addr, u64 size)
{
u64 limit_addr = pci_addr + size - 1;
u32 retries, val;
if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) ||
!IS_ALIGNED(cpu_addr, pci->region_align) ||
!IS_ALIGNED(pci_addr, pci->region_align) || !size) {
return -EINVAL;
}
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_BASE,
lower_32_bits(pci_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_BASE,
upper_32_bits(pci_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LIMIT,
lower_32_bits(limit_addr));
if (dw_pcie_ver_is_ge(pci, 460A))
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_LIMIT,
upper_32_bits(limit_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
lower_32_bits(cpu_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
upper_32_bits(cpu_addr));
val = type;
if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) &&
dw_pcie_ver_is_ge(pci, 460A))
val |= PCIE_ATU_INCREASE_REGION_SIZE;
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, val);
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
return 0;
mdelay(LINK_WAIT_IATU);
}
dev_err(pci->dev, "Inbound iATU is not being enabled\n");
return -ETIMEDOUT;
}
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
int type, u64 cpu_addr, u8 bar)
{
u32 retries, val;
if (!IS_ALIGNED(cpu_addr, pci->region_align))
return -EINVAL;
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
lower_32_bits(cpu_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
upper_32_bits(cpu_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
PCIE_ATU_FUNC_NUM(func_no));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2,
PCIE_ATU_ENABLE | PCIE_ATU_FUNC_NUM_MATCH_EN |
PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
return 0;
mdelay(LINK_WAIT_IATU);
}
dev_err(pci->dev, "Inbound iATU is not being enabled\n");
return -ETIMEDOUT;
}
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index)
{
dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0);
}
int dw_pcie_wait_for_link(struct dw_pcie *pci)
{
u32 offset, val;
int retries;
/* Check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
if (dw_pcie_link_up(pci))
break;
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
if (retries >= LINK_WAIT_MAX_RETRIES) {
dev_info(pci->dev, "Phy link never came up\n");
return -ETIMEDOUT;
}
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
dev_info(pci->dev, "PCIe Gen.%u x%u link up\n",
FIELD_GET(PCI_EXP_LNKSTA_CLS, val),
FIELD_GET(PCI_EXP_LNKSTA_NLW, val));
return 0;
}
EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
int dw_pcie_link_up(struct dw_pcie *pci)
{
u32 val;
if (pci->ops && pci->ops->link_up)
return pci->ops->link_up(pci);
val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1);
return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
}
EXPORT_SYMBOL_GPL(dw_pcie_link_up);
void dw_pcie_upconfig_setup(struct dw_pcie *pci)
{
u32 val;
val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
val |= PORT_MLTI_UPCFG_SUPPORT;
dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
}
EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
{
u32 cap, ctrl2, link_speed;
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
switch (pcie_link_speed[link_gen]) {
case PCIE_SPEED_2_5GT:
link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
break;
case PCIE_SPEED_5_0GT:
link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;
break;
case PCIE_SPEED_8_0GT:
link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT;
break;
case PCIE_SPEED_16_0GT:
link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT;
break;
default:
/* Use hardware capability */
link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
ctrl2 &= ~PCI_EXP_LNKCTL2_HASD;
break;
}
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed);
cap &= ~((u32)PCI_EXP_LNKCAP_SLS);
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed);
}
void dw_pcie_iatu_detect(struct dw_pcie *pci)
{
int max_region, ob, ib;
u32 val, min, dir;
u64 max;
val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
if (val == 0xFFFFFFFF) {
dw_pcie_cap_set(pci, IATU_UNROLL);
max_region = min((int)pci->atu_size / 512, 256);
} else {
pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
}
for (ob = 0; ob < max_region; ob++) {
dw_pcie_writel_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET, 0x11110000);
val = dw_pcie_readl_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET);
if (val != 0x11110000)
break;
}
for (ib = 0; ib < max_region; ib++) {
dw_pcie_writel_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET, 0x11110000);
val = dw_pcie_readl_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET);
if (val != 0x11110000)
break;
}
if (ob) {
dir = PCIE_ATU_REGION_DIR_OB;
} else if (ib) {
dir = PCIE_ATU_REGION_DIR_IB;
} else {
dev_err(pci->dev, "No iATU regions found\n");
return;
}
dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_LIMIT, 0x0);
min = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_LIMIT);
if (dw_pcie_ver_is_ge(pci, 460A)) {
dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF);
max = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT);
} else {
max = 0;
}
pci->num_ob_windows = ob;
pci->num_ib_windows = ib;
pci->region_align = 1 << fls(min);
pci->region_limit = (max << 32) | (SZ_4G - 1);
dev_info(pci->dev, "iATU: unroll %s, %u ob, %u ib, align %uK, limit %lluG\n",
dw_pcie_cap_is(pci, IATU_UNROLL) ? "T" : "F",
pci->num_ob_windows, pci->num_ib_windows,
pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
}
void dw_pcie_setup(struct dw_pcie *pci)
{
u32 val;
if (pci->link_gen > 0)
dw_pcie_link_set_max_speed(pci, pci->link_gen);
/* Configure Gen1 N_FTS */
if (pci->n_fts[0]) {
val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK);
val |= PORT_AFR_N_FTS(pci->n_fts[0]);
val |= PORT_AFR_CC_N_FTS(pci->n_fts[0]);
dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
}
/* Configure Gen2+ N_FTS */
if (pci->n_fts[1]) {
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val &= ~PORT_LOGIC_N_FTS_MASK;
val |= pci->n_fts[1];
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
}
val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_FAST_LINK_MODE;
val |= PORT_LINK_DLL_LINK_EN;
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
if (dw_pcie_cap_is(pci, CDM_CHECK)) {
val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
PCIE_PL_CHK_REG_CHK_REG_START;
dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
}
if (!pci->num_lanes) {
dev_dbg(pci->dev, "Using h/w default number of lanes\n");
return;
}
/* Set the number of lanes */
val &= ~PORT_LINK_FAST_LINK_MODE;
val &= ~PORT_LINK_MODE_MASK;
switch (pci->num_lanes) {
case 1:
val |= PORT_LINK_MODE_1_LANES;
break;
case 2:
val |= PORT_LINK_MODE_2_LANES;
break;
case 4:
val |= PORT_LINK_MODE_4_LANES;
break;
case 8:
val |= PORT_LINK_MODE_8_LANES;
break;
default:
dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);
return;
}
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
/* Set link width speed control register */
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
switch (pci->num_lanes) {
case 1:
val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
break;
case 2:
val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
break;
case 4:
val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
break;
case 8:
val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
break;
}
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
}