Merge branch 'pci/controller/qcom'

- Use devm_clk_bulk_get_all() to get all the clocks from DT to avoid
  writing out all the clock names (Manivannan Sadhasivam)

- Add DT binding and driver support for the SA8775P SoC (Mrinmay Sarkar)

- Refactor dw_pcie_edma_find_chip() to enable adding support for Hyper DMA
  (HDMA) (Manivannan Sadhasivam)

- Enable drivers to supply the eDMA channel count since some can't auto
  detect this (Manivannan Sadhasivam)

- Add HDMA support for the SA8775P SoC (Mrinmay Sarkar)

- Override the SA8775P NO_SNOOP default to avoid possible memory corruption
  (Mrinmay Sarkar)

- Make sure resources are disabled during PERST# assertion, even if the
  link is already disabled (Manivannan Sadhasivam)

- Vote for the CPU-PCIe ICC (interconnect) path to ensure it stays active
  even if other drivers don't vote for it (Krishna chaitanya chundru)

- Add Operating Performance Points (OPP) to scale performance state based
  on aggregate link bandwidth to improve SoC power efficiency (Krishna
  chaitanya chundru)

- Return failure instead of success if dev_pm_opp_find_freq_floor() fails
  (Dan Carpenter)

- Avoid an error pointer dereference if dev_pm_opp_find_freq_exact() fails
  (Dan Carpenter)

- Prevent use of uninitialized data in qcom_pcie_suspend_noirq() (Dan
  Carpenter)

* pci/controller/qcom:
  PCI: qcom: Prevent use of uninitialized data in qcom_pcie_suspend_noirq()
  PCI: qcom: Prevent potential error pointer dereference
  PCI: qcom: Fix missing error code in qcom_pcie_probe()
  PCI: qcom: Add OPP support to scale performance
  PCI: Bring the PCIe speed to MBps logic to new pcie_dev_speed_mbps()
  PCI: qcom: Add ICC bandwidth vote for CPU to PCIe path
  PCI: qcom-ep: Disable resources unconditionally during PERST# assert
  PCI: qcom-ep: Override NO_SNOOP attribute for SA8775P EP
  PCI: qcom: Override NO_SNOOP attribute for SA8775P RC
  PCI: epf-mhi: Enable HDMA for SA8775P SoC
  PCI: qcom-ep: Add HDMA support for SA8775P SoC
  PCI: dwc: Pass the eDMA mapping format flag directly from glue drivers
  PCI: dwc: Skip finding eDMA channels count for HDMA platforms
  PCI: dwc: Refactor dw_pcie_edma_find_chip() API
  PCI: qcom-ep: Add support for SA8775P SOC
  dt-bindings: PCI: qcom-ep: Add support for SA8775P SoC
  PCI: qcom: Use devm_clk_bulk_get_all() API
This commit is contained in:
Bjorn Helgaas 2024-07-19 10:10:31 -05:00
commit df5dd33728
9 changed files with 376 additions and 188 deletions

View File

@ -13,6 +13,7 @@ properties:
compatible:
oneOf:
- enum:
- qcom,sa8775p-pcie-ep
- qcom,sdx55-pcie-ep
- qcom,sm8450-pcie-ep
- items:
@ -20,6 +21,7 @@ properties:
- const: qcom,sdx55-pcie-ep
reg:
minItems: 6
items:
- description: Qualcomm-specific PARF configuration registers
- description: DesignWare PCIe registers
@ -27,8 +29,10 @@ properties:
- description: Address Translation Unit (ATU) registers
- description: Memory region used to map remote RC address space
- description: BAR memory region
- description: DMA register space
reg-names:
minItems: 6
items:
- const: parf
- const: dbi
@ -36,13 +40,14 @@ properties:
- const: atu
- const: addr_space
- const: mmio
- const: dma
clocks:
minItems: 7
minItems: 5
maxItems: 8
clock-names:
minItems: 7
minItems: 5
maxItems: 8
qcom,perst-regs:
@ -57,14 +62,18 @@ properties:
- description: Perst separation enable offset
interrupts:
minItems: 2
items:
- description: PCIe Global interrupt
- description: PCIe Doorbell interrupt
- description: DMA interrupt
interrupt-names:
minItems: 2
items:
- const: global
- const: doorbell
- const: dma
reset-gpios:
description: GPIO used as PERST# input signal
@ -125,6 +134,10 @@ allOf:
- qcom,sdx55-pcie-ep
then:
properties:
reg:
maxItems: 6
reg-names:
maxItems: 6
clocks:
items:
- description: PCIe Auxiliary clock
@ -143,6 +156,10 @@ allOf:
- const: slave_q2a
- const: sleep
- const: ref
interrupts:
maxItems: 2
interrupt-names:
maxItems: 2
- if:
properties:
@ -152,6 +169,10 @@ allOf:
- qcom,sm8450-pcie-ep
then:
properties:
reg:
maxItems: 6
reg-names:
maxItems: 6
clocks:
items:
- description: PCIe Auxiliary clock
@ -172,6 +193,45 @@ allOf:
- const: ref
- const: ddrss_sf_tbu
- const: aggre_noc_axi
interrupts:
maxItems: 2
interrupt-names:
maxItems: 2
- if:
properties:
compatible:
contains:
enum:
- qcom,sa8775p-pcie-ep
then:
properties:
reg:
minItems: 7
maxItems: 7
reg-names:
minItems: 7
maxItems: 7
clocks:
items:
- description: PCIe Auxiliary clock
- description: PCIe CFG AHB clock
- description: PCIe Master AXI clock
- description: PCIe Slave AXI clock
- description: PCIe Slave Q2A AXI clock
clock-names:
items:
- const: aux
- const: cfg
- const: bus_master
- const: bus_slave
- const: slave_q2a
interrupts:
minItems: 3
maxItems: 3
interrupt-names:
minItems: 3
maxItems: 3
unevaluatedProperties: false

View File

@ -870,30 +870,40 @@ static struct dw_edma_plat_ops dw_pcie_edma_ops = {
.irq_vector = dw_pcie_edma_irq_vector,
};
static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
static void dw_pcie_edma_init_data(struct dw_pcie *pci)
{
pci->edma.dev = pci->dev;
if (!pci->edma.ops)
pci->edma.ops = &dw_pcie_edma_ops;
pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
}
static int dw_pcie_edma_find_mf(struct dw_pcie *pci)
{
u32 val;
/*
* Bail out finding the mapping format if it is already set by the glue
* driver. Also ensure that the edma.reg_base is pointing to a valid
* memory region.
*/
if (pci->edma.mf != EDMA_MF_EDMA_LEGACY)
return pci->edma.reg_base ? 0 : -ENODEV;
/*
* Indirect eDMA CSRs access has been completely removed since v5.40a
* thus no space is now reserved for the eDMA channels viewport and
* former DMA CTRL register is no longer fixed to FFs.
*
* Note that Renesas R-Car S4-8's PCIe controllers for unknown reason
* have zeros in the eDMA CTRL register even though the HW-manual
* explicitly states there must FFs if the unrolled mapping is enabled.
* For such cases the low-level drivers are supposed to manually
* activate the unrolled mapping to bypass the auto-detection procedure.
*/
if (dw_pcie_ver_is_ge(pci, 540A) || dw_pcie_cap_is(pci, EDMA_UNROLL))
if (dw_pcie_ver_is_ge(pci, 540A))
val = 0xFFFFFFFF;
else
val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL);
if (val == 0xFFFFFFFF && pci->edma.reg_base) {
pci->edma.mf = EDMA_MF_EDMA_UNROLL;
val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
} else if (val != 0xFFFFFFFF) {
pci->edma.mf = EDMA_MF_EDMA_LEGACY;
@ -902,15 +912,25 @@ static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
return -ENODEV;
}
pci->edma.dev = pci->dev;
return 0;
}
if (!pci->edma.ops)
pci->edma.ops = &dw_pcie_edma_ops;
static int dw_pcie_edma_find_channels(struct dw_pcie *pci)
{
u32 val;
pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
/*
* Autodetect the read/write channels count only for non-HDMA platforms.
* HDMA platforms with native CSR mapping doesn't support autodetect,
* so the glue drivers should've passed the valid count already. If not,
* the below sanity check will catch it.
*/
if (pci->edma.mf != EDMA_MF_HDMA_NATIVE) {
val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
}
/* Sanity check the channels count if the mapping was incorrect */
if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH ||
@ -920,6 +940,19 @@ static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
return 0;
}
static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
{
int ret;
dw_pcie_edma_init_data(pci);
ret = dw_pcie_edma_find_mf(pci);
if (ret)
return ret;
return dw_pcie_edma_find_channels(pci);
}
static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
{
struct platform_device *pdev = to_platform_device(pci->dev);

View File

@ -51,9 +51,8 @@
/* DWC PCIe controller capabilities */
#define DW_PCIE_CAP_REQ_RES 0
#define DW_PCIE_CAP_EDMA_UNROLL 1
#define DW_PCIE_CAP_IATU_UNROLL 2
#define DW_PCIE_CAP_CDM_CHECK 3
#define DW_PCIE_CAP_IATU_UNROLL 1
#define DW_PCIE_CAP_CDM_CHECK 2
#define dw_pcie_cap_is(_pci, _cap) \
test_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)

View File

@ -47,6 +47,7 @@
#define PARF_DBI_BASE_ADDR_HI 0x354
#define PARF_SLV_ADDR_SPACE_SIZE 0x358
#define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c
#define PARF_NO_SNOOP_OVERIDE 0x3d4
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_SRIS_MODE 0x644
@ -86,6 +87,10 @@
#define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2)
#define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3)
/* PARF_NO_SNOOP_OVERIDE register fields */
#define WR_NO_SNOOP_OVERIDE_EN BIT(1)
#define RD_NO_SNOOP_OVERIDE_EN BIT(3)
/* PARF_DEVICE_TYPE register fields */
#define PARF_DEVICE_TYPE_EP 0x0
@ -149,6 +154,16 @@ enum qcom_pcie_ep_link_status {
QCOM_PCIE_EP_LINK_DOWN,
};
/**
* struct qcom_pcie_ep_cfg - Per SoC config struct
* @hdma_support: HDMA support on this SoC
* @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache snooping
*/
struct qcom_pcie_ep_cfg {
bool hdma_support;
bool override_no_snoop;
};
/**
* struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller
* @pci: Designware PCIe controller struct
@ -167,6 +182,7 @@ enum qcom_pcie_ep_link_status {
* @num_clks: PCIe clocks count
* @perst_en: Flag for PERST enable
* @perst_sep_en: Flag for PERST separation enable
* @cfg: PCIe EP config struct
* @link_status: PCIe Link status
* @global_irq: Qualcomm PCIe specific Global IRQ
* @perst_irq: PERST# IRQ
@ -194,6 +210,7 @@ struct qcom_pcie_ep {
u32 perst_en;
u32 perst_sep_en;
const struct qcom_pcie_ep_cfg *cfg;
enum qcom_pcie_ep_link_status link_status;
int global_irq;
int perst_irq;
@ -489,6 +506,10 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
val |= BIT(8);
writel_relaxed(val, pcie_ep->parf + PARF_LTSSM);
if (pcie_ep->cfg && pcie_ep->cfg->override_no_snoop)
writel_relaxed(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN,
pcie_ep->parf + PARF_NO_SNOOP_OVERIDE);
return 0;
err_disable_resources:
@ -500,12 +521,6 @@ err_disable_resources:
static void qcom_pcie_perst_assert(struct dw_pcie *pci)
{
struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
struct device *dev = pci->dev;
if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) {
dev_dbg(dev, "Link is already disabled\n");
return;
}
pci_epc_deinit_notify(pci->ep.epc);
dw_pcie_ep_cleanup(&pci->ep);
@ -817,6 +832,14 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
pcie_ep->pci.ops = &pci_ops;
pcie_ep->pci.ep.ops = &pci_ep_ops;
pcie_ep->pci.edma.nr_irqs = 1;
pcie_ep->cfg = of_device_get_match_data(dev);
if (pcie_ep->cfg && pcie_ep->cfg->hdma_support) {
pcie_ep->pci.edma.ll_wr_cnt = 8;
pcie_ep->pci.edma.ll_rd_cnt = 8;
pcie_ep->pci.edma.mf = EDMA_MF_HDMA_NATIVE;
}
platform_set_drvdata(pdev, pcie_ep);
ret = qcom_pcie_ep_get_resources(pdev, pcie_ep);
@ -875,7 +898,13 @@ static void qcom_pcie_ep_remove(struct platform_device *pdev)
qcom_pcie_disable_resources(pcie_ep);
}
static const struct qcom_pcie_ep_cfg cfg_1_34_0 = {
.hdma_support = true,
.override_no_snoop = true,
};
static const struct of_device_id qcom_pcie_ep_match[] = {
{ .compatible = "qcom,sa8775p-pcie-ep", .data = &cfg_1_34_0},
{ .compatible = "qcom,sdx55-pcie-ep", },
{ .compatible = "qcom,sm8450-pcie-ep", },
{ }

View File

@ -18,9 +18,11 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/phy/pcie.h>
@ -29,6 +31,7 @@
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/units.h>
#include "../../pci.h"
#include "pcie-designware.h"
@ -50,6 +53,7 @@
#define PARF_SID_OFFSET 0x234
#define PARF_BDF_TRANSLATE_CFG 0x24c
#define PARF_SLV_ADDR_SPACE_SIZE 0x358
#define PARF_NO_SNOOP_OVERIDE 0x3d4
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_TABLE_N 0x2000
#define PARF_BDF_TO_SID_CFG 0x2c00
@ -117,6 +121,10 @@
/* PARF_LTSSM register fields */
#define LTSSM_EN BIT(8)
/* PARF_NO_SNOOP_OVERIDE register fields */
#define WR_NO_SNOOP_OVERIDE_EN BIT(1)
#define RD_NO_SNOOP_OVERIDE_EN BIT(3)
/* PARF_DEVICE_TYPE register fields */
#define DEVICE_TYPE_RC 0x4
@ -153,58 +161,56 @@
#define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
#define QCOM_PCIE_1_0_0_MAX_CLOCKS 4
struct qcom_pcie_resources_1_0_0 {
struct clk_bulk_data clks[QCOM_PCIE_1_0_0_MAX_CLOCKS];
struct clk_bulk_data *clks;
int num_clks;
struct reset_control *core;
struct regulator *vdda;
};
#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
#define QCOM_PCIE_2_1_0_MAX_RESETS 6
#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
struct qcom_pcie_resources_2_1_0 {
struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
struct clk_bulk_data *clks;
int num_clks;
struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
int num_resets;
struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
};
#define QCOM_PCIE_2_3_2_MAX_CLOCKS 4
#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
struct qcom_pcie_resources_2_3_2 {
struct clk_bulk_data clks[QCOM_PCIE_2_3_2_MAX_CLOCKS];
struct clk_bulk_data *clks;
int num_clks;
struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
};
#define QCOM_PCIE_2_3_3_MAX_CLOCKS 5
#define QCOM_PCIE_2_3_3_MAX_RESETS 7
struct qcom_pcie_resources_2_3_3 {
struct clk_bulk_data clks[QCOM_PCIE_2_3_3_MAX_CLOCKS];
struct clk_bulk_data *clks;
int num_clks;
struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
};
#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
#define QCOM_PCIE_2_4_0_MAX_RESETS 12
struct qcom_pcie_resources_2_4_0 {
struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
struct clk_bulk_data *clks;
int num_clks;
struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
int num_resets;
};
#define QCOM_PCIE_2_7_0_MAX_CLOCKS 15
#define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2
struct qcom_pcie_resources_2_7_0 {
struct clk_bulk_data clks[QCOM_PCIE_2_7_0_MAX_CLOCKS];
struct clk_bulk_data *clks;
int num_clks;
struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
struct reset_control *rst;
};
#define QCOM_PCIE_2_9_0_MAX_CLOCKS 5
struct qcom_pcie_resources_2_9_0 {
struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS];
struct clk_bulk_data *clks;
int num_clks;
struct reset_control *rst;
};
@ -230,8 +236,15 @@ struct qcom_pcie_ops {
int (*config_sid)(struct qcom_pcie *pcie);
};
/**
* struct qcom_pcie_cfg - Per SoC config struct
* @ops: qcom PCIe ops structure
* @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache
* snooping
*/
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
bool override_no_snoop;
bool no_l0s;
};
@ -244,6 +257,7 @@ struct qcom_pcie {
struct phy *phy;
struct gpio_desc *reset;
struct icc_path *icc_mem;
struct icc_path *icc_cpu;
const struct qcom_pcie_cfg *cfg;
struct dentry *debugfs;
bool suspended;
@ -336,21 +350,11 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
if (ret)
return ret;
res->clks[0].id = "iface";
res->clks[1].id = "core";
res->clks[2].id = "phy";
res->clks[3].id = "aux";
res->clks[4].id = "ref";
/* iface, core, phy are required */
ret = devm_clk_bulk_get(dev, 3, res->clks);
if (ret < 0)
return ret;
/* aux, ref are optional */
ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
if (ret < 0)
return ret;
res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
if (res->num_clks < 0) {
dev_err(dev, "Failed to get clocks\n");
return res->num_clks;
}
res->resets[0].id = "pci";
res->resets[1].id = "axi";
@ -372,7 +376,7 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
clk_bulk_disable_unprepare(res->num_clks, res->clks);
reset_control_bulk_assert(res->num_resets, res->resets);
writel(1, pcie->parf + PARF_PHY_CTRL);
@ -424,7 +428,7 @@ static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret)
return ret;
@ -475,20 +479,16 @@ static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
int ret;
res->vdda = devm_regulator_get(dev, "vdda");
if (IS_ERR(res->vdda))
return PTR_ERR(res->vdda);
res->clks[0].id = "iface";
res->clks[1].id = "aux";
res->clks[2].id = "master_bus";
res->clks[3].id = "slave_bus";
ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
if (ret < 0)
return ret;
res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
if (res->num_clks < 0) {
dev_err(dev, "Failed to get clocks\n");
return res->num_clks;
}
res->core = devm_reset_control_get_exclusive(dev, "core");
return PTR_ERR_OR_ZERO(res->core);
@ -499,7 +499,7 @@ static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
reset_control_assert(res->core);
clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
clk_bulk_disable_unprepare(res->num_clks, res->clks);
regulator_disable(res->vdda);
}
@ -516,7 +516,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
return ret;
}
ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
dev_err(dev, "cannot prepare/enable clocks\n");
goto err_assert_reset;
@ -531,7 +531,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
return 0;
err_disable_clks:
clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
clk_bulk_disable_unprepare(res->num_clks, res->clks);
err_assert_reset:
reset_control_assert(res->core);
@ -579,14 +579,11 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
if (ret)
return ret;
res->clks[0].id = "aux";
res->clks[1].id = "cfg";
res->clks[2].id = "bus_master";
res->clks[3].id = "bus_slave";
ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
if (ret < 0)
return ret;
res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
if (res->num_clks < 0) {
dev_err(dev, "Failed to get clocks\n");
return res->num_clks;
}
return 0;
}
@ -595,7 +592,7 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
clk_bulk_disable_unprepare(res->num_clks, res->clks);
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
@ -612,7 +609,7 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
return ret;
}
ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
dev_err(dev, "cannot prepare/enable clocks\n");
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
@ -660,17 +657,11 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
int ret;
res->clks[0].id = "aux";
res->clks[1].id = "master_bus";
res->clks[2].id = "slave_bus";
res->clks[3].id = "iface";
/* qcom,pcie-ipq4019 is defined without "iface" */
res->num_clks = is_ipq ? 3 : 4;
ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
if (ret < 0)
return ret;
res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
if (res->num_clks < 0) {
dev_err(dev, "Failed to get clocks\n");
return res->num_clks;
}
res->resets[0].id = "axi_m";
res->resets[1].id = "axi_s";
@ -741,15 +732,11 @@ static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
struct device *dev = pci->dev;
int ret;
res->clks[0].id = "iface";
res->clks[1].id = "axi_m";
res->clks[2].id = "axi_s";
res->clks[3].id = "ahb";
res->clks[4].id = "aux";
ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
if (ret < 0)
return ret;
res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
if (res->num_clks < 0) {
dev_err(dev, "Failed to get clocks\n");
return res->num_clks;
}
res->rst[0].id = "axi_m";
res->rst[1].id = "axi_s";
@ -770,7 +757,7 @@ static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
clk_bulk_disable_unprepare(res->num_clks, res->clks);
}
static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
@ -800,7 +787,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
*/
usleep_range(2000, 2500);
ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
dev_err(dev, "cannot prepare/enable clocks\n");
goto err_assert_resets;
@ -861,8 +848,6 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
unsigned int num_clks, num_opt_clks;
unsigned int idx;
int ret;
res->rst = devm_reset_control_array_get_exclusive(dev);
@ -876,36 +861,11 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
if (ret)
return ret;
idx = 0;
res->clks[idx++].id = "aux";
res->clks[idx++].id = "cfg";
res->clks[idx++].id = "bus_master";
res->clks[idx++].id = "bus_slave";
res->clks[idx++].id = "slave_q2a";
num_clks = idx;
ret = devm_clk_bulk_get(dev, num_clks, res->clks);
if (ret < 0)
return ret;
res->clks[idx++].id = "tbu";
res->clks[idx++].id = "ddrss_sf_tbu";
res->clks[idx++].id = "aggre0";
res->clks[idx++].id = "aggre1";
res->clks[idx++].id = "noc_aggr";
res->clks[idx++].id = "noc_aggr_4";
res->clks[idx++].id = "noc_aggr_south_sf";
res->clks[idx++].id = "cnoc_qx";
res->clks[idx++].id = "sleep";
res->clks[idx++].id = "cnoc_sf_axi";
num_opt_clks = idx - num_clks;
res->num_clks = idx;
ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks);
if (ret < 0)
return ret;
res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
if (res->num_clks < 0) {
dev_err(dev, "Failed to get clocks\n");
return res->num_clks;
}
return 0;
}
@ -985,6 +945,12 @@ err_disable_regulators:
static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
{
const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg;
if (pcie_cfg->override_no_snoop)
writel(WR_NO_SNOOP_OVERIDE_EN | RD_NO_SNOOP_OVERIDE_EN,
pcie->parf + PARF_NO_SNOOP_OVERIDE);
qcom_pcie_clear_aspm_l0s(pcie->pci);
qcom_pcie_clear_hpc(pcie->pci);
@ -1100,17 +1066,12 @@ static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
int ret;
res->clks[0].id = "iface";
res->clks[1].id = "axi_m";
res->clks[2].id = "axi_s";
res->clks[3].id = "axi_bridge";
res->clks[4].id = "rchng";
ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
if (ret < 0)
return ret;
res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
if (res->num_clks < 0) {
dev_err(dev, "Failed to get clocks\n");
return res->num_clks;
}
res->rst = devm_reset_control_array_get_exclusive(dev);
if (IS_ERR(res->rst))
@ -1123,7 +1084,7 @@ static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
clk_bulk_disable_unprepare(res->num_clks, res->clks);
}
static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
@ -1152,7 +1113,7 @@ static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
usleep_range(2000, 2500);
return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
return clk_bulk_prepare_enable(res->num_clks, res->clks);
}
static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
@ -1365,6 +1326,11 @@ static const struct qcom_pcie_cfg cfg_1_9_0 = {
.ops = &ops_1_9_0,
};
static const struct qcom_pcie_cfg cfg_1_34_0 = {
.ops = &ops_1_9_0,
.override_no_snoop = true,
};
static const struct qcom_pcie_cfg cfg_2_1_0 = {
.ops = &ops_2_1_0,
};
@ -1408,6 +1374,9 @@ static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
if (IS_ERR(pcie->icc_mem))
return PTR_ERR(pcie->icc_mem);
pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie");
if (IS_ERR(pcie->icc_cpu))
return PTR_ERR(pcie->icc_cpu);
/*
* Some Qualcomm platforms require interconnect bandwidth constraints
* to be set before enabling interconnect clocks.
@ -1417,23 +1386,35 @@ static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
*/
ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
if (ret) {
dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
ret);
return ret;
}
/*
* Since the CPU-PCIe path is only used for activities like register
* access of the host controller and endpoint Config/BAR space access,
* HW team has recommended to use a minimal bandwidth of 1KBps just to
* keep the path active.
*/
ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1));
if (ret) {
dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n",
ret);
icc_set_bw(pcie->icc_mem, 0, 0);
return ret;
}
return 0;
}
static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie)
{
u32 offset, status, width, speed;
struct dw_pcie *pci = pcie->pci;
u32 offset, status;
int speed, width;
int ret;
if (!pcie->icc_mem)
return;
unsigned long freq_kbps;
struct dev_pm_opp *opp;
int ret, freq_mbps;
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
@ -1445,11 +1426,29 @@ static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
ret = icc_set_bw(pcie->icc_mem, 0, width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
if (pcie->icc_mem) {
ret = icc_set_bw(pcie->icc_mem, 0,
width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
if (ret) {
dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
ret);
}
} else {
freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]);
if (freq_mbps < 0)
return;
freq_kbps = freq_mbps * KILO;
opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width,
true);
if (!IS_ERR(opp)) {
ret = dev_pm_opp_set_opp(pci->dev, opp);
if (ret)
dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n",
freq_kbps * width, ret);
dev_pm_opp_put(opp);
}
}
}
static int qcom_pcie_link_transition_count(struct seq_file *s, void *data)
@ -1492,7 +1491,9 @@ static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
static int qcom_pcie_probe(struct platform_device *pdev)
{
const struct qcom_pcie_cfg *pcie_cfg;
unsigned long max_freq = ULONG_MAX;
struct device *dev = &pdev->dev;
struct dev_pm_opp *opp;
struct qcom_pcie *pcie;
struct dw_pcie_rp *pp;
struct resource *res;
@ -1560,9 +1561,43 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_pm_runtime_put;
}
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(dev);
if (ret && ret != -ENODEV) {
dev_err_probe(dev, ret, "Failed to add OPP table\n");
goto err_pm_runtime_put;
}
/*
* Before the PCIe link is initialized, vote for highest OPP in the OPP
* table, so that we are voting for maximum voltage corner for the
* link to come up in maximum supported speed. At the end of the
* probe(), OPP will be updated using qcom_pcie_icc_opp_update().
*/
if (!ret) {
opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
if (IS_ERR(opp)) {
ret = PTR_ERR(opp);
dev_err_probe(pci->dev, ret,
"Unable to find max freq OPP\n");
goto err_pm_runtime_put;
} else {
ret = dev_pm_opp_set_opp(dev, opp);
}
dev_pm_opp_put(opp);
if (ret) {
dev_err_probe(pci->dev, ret,
"Failed to set OPP for freq %lu\n",
max_freq);
goto err_pm_runtime_put;
}
} else {
/* Skip ICC init if OPP is supported as it is handled by OPP */
ret = qcom_pcie_icc_init(pcie);
if (ret)
goto err_pm_runtime_put;
}
ret = pcie->cfg->ops->get_resources(pcie);
if (ret)
@ -1582,7 +1617,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_phy_exit;
}
qcom_pcie_icc_update(pcie);
qcom_pcie_icc_opp_update(pcie);
if (pcie->mhi)
qcom_pcie_init_debugfs(pcie);
@ -1601,17 +1636,21 @@ err_pm_runtime_put:
static int qcom_pcie_suspend_noirq(struct device *dev)
{
struct qcom_pcie *pcie = dev_get_drvdata(dev);
int ret;
int ret = 0;
/*
* Set minimum bandwidth required to keep data path functional during
* suspend.
*/
if (pcie->icc_mem) {
ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
if (ret) {
dev_err(dev, "Failed to set interconnect bandwidth: %d\n", ret);
dev_err(dev,
"Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
ret);
return ret;
}
}
/*
* Turn OFF the resources only for controllers without active PCIe
@ -1633,7 +1672,21 @@ static int qcom_pcie_suspend_noirq(struct device *dev)
pcie->suspended = true;
}
return 0;
/*
* Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM.
* Because on some platforms, DBI access can happen very late during the
* S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC
* error.
*/
if (pm_suspend_target_state != PM_SUSPEND_MEM) {
ret = icc_disable(pcie->icc_cpu);
if (ret)
dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret);
if (!pcie->icc_mem)
dev_pm_opp_set_opp(pcie->pci->dev, NULL);
}
return ret;
}
static int qcom_pcie_resume_noirq(struct device *dev)
@ -1641,6 +1694,14 @@ static int qcom_pcie_resume_noirq(struct device *dev)
struct qcom_pcie *pcie = dev_get_drvdata(dev);
int ret;
if (pm_suspend_target_state != PM_SUSPEND_MEM) {
ret = icc_enable(pcie->icc_cpu);
if (ret) {
dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret);
return ret;
}
}
if (pcie->suspended) {
ret = qcom_pcie_host_init(&pcie->pci->pp);
if (ret)
@ -1649,7 +1710,7 @@ static int qcom_pcie_resume_noirq(struct device *dev)
pcie->suspended = false;
}
qcom_pcie_icc_update(pcie);
qcom_pcie_icc_opp_update(pcie);
return 0;
}
@ -1666,7 +1727,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
{ .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0},
{ .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },

View File

@ -255,7 +255,7 @@ static struct rcar_gen4_pcie *rcar_gen4_pcie_alloc(struct platform_device *pdev)
rcar->dw.ops = &dw_pcie_ops;
rcar->dw.dev = dev;
rcar->pdev = pdev;
dw_pcie_cap_set(&rcar->dw, EDMA_UNROLL);
rcar->dw.edma.mf = EDMA_MF_EDMA_UNROLL;
dw_pcie_cap_set(&rcar->dw, REQ_RES);
platform_set_drvdata(pdev, rcar);

View File

@ -137,6 +137,7 @@ static const struct pci_epf_mhi_ep_info sa8775p_info = {
.epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
.msi_count = 32,
.mru = 0x8000,
.flags = MHI_EPF_USE_DMA,
};
struct pci_epf_mhi {

View File

@ -6100,24 +6100,7 @@ int pcie_link_speed_mbps(struct pci_dev *pdev)
if (err)
return err;
switch (to_pcie_link_speed(lnksta)) {
case PCIE_SPEED_2_5GT:
return 2500;
case PCIE_SPEED_5_0GT:
return 5000;
case PCIE_SPEED_8_0GT:
return 8000;
case PCIE_SPEED_16_0GT:
return 16000;
case PCIE_SPEED_32_0GT:
return 32000;
case PCIE_SPEED_64_0GT:
return 64000;
default:
break;
}
return -EINVAL;
return pcie_dev_speed_mbps(to_pcie_link_speed(lnksta));
}
EXPORT_SYMBOL(pcie_link_speed_mbps);

View File

@ -326,6 +326,28 @@ void pci_bus_put(struct pci_bus *bus);
(speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \
0)
static inline int pcie_dev_speed_mbps(enum pci_bus_speed speed)
{
switch (speed) {
case PCIE_SPEED_2_5GT:
return 2500;
case PCIE_SPEED_5_0GT:
return 5000;
case PCIE_SPEED_8_0GT:
return 8000;
case PCIE_SPEED_16_0GT:
return 16000;
case PCIE_SPEED_32_0GT:
return 32000;
case PCIE_SPEED_64_0GT:
return 64000;
default:
break;
}
return -EINVAL;
}
const char *pci_speed_string(enum pci_bus_speed speed);
enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);