PCI: dwc: Introduce dma-ranges property support for RC-host
In accordance with the generic PCIe Root Port DT-bindings the "dma-ranges" property has the same format as the "ranges" property. The only difference is in their semantics. The "dma-ranges" property describes the PCIe-to-CPU memory mapping in opposite to the CPU-to-PCIe mapping of the "ranges" property. Even though the DW PCIe controllers are normally equipped with the internal Address Translation Unit which inbound and outbound tables can be used to implement both properties semantics, it was surprising for me to discover that the host-related part of the DW PCIe driver currently supports the "ranges" property only while the "dma-ranges" windows are just ignored. Having the "dma-ranges" supported in the driver would be very handy for the platforms, that don't tolerate the 1:1 CPU-PCIe memory mapping and require a customized PCIe memory layout. So let's fix that by introducing the "dma-ranges" property support. First of all we suggest to rename the dw_pcie_prog_inbound_atu() method to dw_pcie_prog_ep_inbound_atu() and create a new version of the dw_pcie_prog_inbound_atu() function. Thus we'll have two methods for the RC and EP controllers respectively in the same way as it has been developed for the outbound ATU setup methods. Secondly aside with the memory window index and type the new dw_pcie_prog_inbound_atu() function will accept CPU address, PCIe address and size as its arguments. These parameters define the PCIe and CPU memory ranges which will be used to setup the respective inbound ATU mapping. The passed parameters need to be verified against the ATU ranges constraints in the same way as it is done for the outbound ranges. Finally the DMA-ranges detected for the PCIe controller need to be converted to the inbound ATU entries during the host controller initialization procedure. It will be done in the framework of the dw_pcie_iatu_setup() method. Note before setting the inbound ranges up we need to disable all the inbound ATU entries in order to prevent unexpected PCIe TLPs translations defined by some third party software like bootloaders. Link: https://lore.kernel.org/r/20221113191301.5526-16-Sergey.Semin@baikalelectronics.ru Signed-off-by: Serge Semin <Sergey.Semin@baikalelectronics.ru> Signed-off-by: Lorenzo Pieralisi <lpieralisi@kernel.org> Reviewed-by: Rob Herring <robh@kernel.org> Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
This commit is contained in:
parent
ce27c4e61f
commit
8522e17d4c
@ -171,8 +171,8 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, type,
|
||||
cpu_addr, bar);
|
||||
ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
|
||||
cpu_addr, bar);
|
||||
if (ret < 0) {
|
||||
dev_err(pci->dev, "Failed to program IB window\n");
|
||||
return ret;
|
||||
|
@ -643,12 +643,15 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure all outbound windows are disabled before proceeding with
|
||||
* the MEM/IO ranges setups.
|
||||
* Ensure all out/inbound windows are disabled before proceeding with
|
||||
* the MEM/IO (dma-)ranges setups.
|
||||
*/
|
||||
for (i = 0; i < pci->num_ob_windows; i++)
|
||||
dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
|
||||
|
||||
for (i = 0; i < pci->num_ib_windows; i++)
|
||||
dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
|
||||
|
||||
i = 0;
|
||||
resource_list_for_each_entry(entry, &pp->bridge->windows) {
|
||||
if (resource_type(entry->res) != IORESOURCE_MEM)
|
||||
@ -685,9 +688,32 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
|
||||
}
|
||||
|
||||
if (pci->num_ob_windows <= i)
|
||||
dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)\n",
|
||||
dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
|
||||
pci->num_ob_windows);
|
||||
|
||||
i = 0;
|
||||
resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
|
||||
if (resource_type(entry->res) != IORESOURCE_MEM)
|
||||
continue;
|
||||
|
||||
if (pci->num_ib_windows <= i)
|
||||
break;
|
||||
|
||||
ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
|
||||
entry->res->start,
|
||||
entry->res->start - entry->offset,
|
||||
resource_size(entry->res));
|
||||
if (ret) {
|
||||
dev_err(pci->dev, "Failed to set DMA range %pr\n",
|
||||
entry->res);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (pci->num_ib_windows <= i)
|
||||
dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n",
|
||||
pci->num_ib_windows);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -393,8 +393,60 @@ static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg
|
||||
dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val);
|
||||
}
|
||||
|
||||
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
int type, u64 cpu_addr, u8 bar)
|
||||
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
|
||||
u64 cpu_addr, u64 pci_addr, u64 size)
|
||||
{
|
||||
u64 limit_addr = pci_addr + size - 1;
|
||||
u32 retries, val;
|
||||
|
||||
if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) ||
|
||||
!IS_ALIGNED(cpu_addr, pci->region_align) ||
|
||||
!IS_ALIGNED(pci_addr, pci->region_align) || !size) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_BASE,
|
||||
lower_32_bits(pci_addr));
|
||||
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_BASE,
|
||||
upper_32_bits(pci_addr));
|
||||
|
||||
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LIMIT,
|
||||
lower_32_bits(limit_addr));
|
||||
if (dw_pcie_ver_is_ge(pci, 460A))
|
||||
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_LIMIT,
|
||||
upper_32_bits(limit_addr));
|
||||
|
||||
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
|
||||
lower_32_bits(cpu_addr));
|
||||
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
|
||||
upper_32_bits(cpu_addr));
|
||||
|
||||
val = type;
|
||||
if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) &&
|
||||
dw_pcie_ver_is_ge(pci, 460A))
|
||||
val |= PCIE_ATU_INCREASE_REGION_SIZE;
|
||||
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, val);
|
||||
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
|
||||
|
||||
/*
|
||||
* Make sure ATU enable takes effect before any subsequent config
|
||||
* and I/O accesses.
|
||||
*/
|
||||
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
|
||||
val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
|
||||
if (val & PCIE_ATU_ENABLE)
|
||||
return 0;
|
||||
|
||||
mdelay(LINK_WAIT_IATU);
|
||||
}
|
||||
|
||||
dev_err(pci->dev, "Inbound iATU is not being enabled\n");
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
int type, u64 cpu_addr, u8 bar)
|
||||
{
|
||||
u32 retries, val;
|
||||
|
||||
|
@ -346,8 +346,10 @@ int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
|
||||
u64 cpu_addr, u64 pci_addr, u64 size);
|
||||
int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
int type, u64 cpu_addr, u64 pci_addr, u64 size);
|
||||
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
int type, u64 cpu_addr, u8 bar);
|
||||
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
|
||||
u64 cpu_addr, u64 pci_addr, u64 size);
|
||||
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
||||
int type, u64 cpu_addr, u8 bar);
|
||||
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
|
||||
void dw_pcie_setup(struct dw_pcie *pci);
|
||||
void dw_pcie_iatu_detect(struct dw_pcie *pci);
|
||||
|
Loading…
Reference in New Issue
Block a user