2018-01-26 12:50:27 -06:00
// SPDX-License-Identifier: GPL-2.0
2017-02-15 18:48:17 +05:30
/*
2017-09-01 16:35:50 -05:00
* Synopsys DesignWare PCIe host controller driver
2017-02-15 18:48:17 +05:30
*
* Copyright ( C ) 2013 Samsung Electronics Co . , Ltd .
2020-06-27 12:30:50 +02:00
* https : //www.samsung.com
2017-02-15 18:48:17 +05:30
*
* Author : Jingoo Han < jg1 . han @ samsung . com >
*/
2018-03-06 11:54:53 +00:00
# include <linux/irqchip/chained_irq.h>
2017-02-15 18:48:17 +05:30
# include <linux/irqdomain.h>
2019-09-03 13:30:59 +02:00
# include <linux/msi.h>
2017-02-15 18:48:17 +05:30
# include <linux/of_address.h>
# include <linux/of_pci.h>
# include <linux/pci_regs.h>
# include <linux/platform_device.h>
2018-05-31 09:12:37 +08:00
# include "../../pci.h"
2017-02-15 18:48:17 +05:30
# include "pcie-designware.h"
static struct pci_ops dw_pcie_ops ;
2020-08-20 21:53:53 -06:00
static struct pci_ops dw_child_pcie_ops ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
static void dw_msi_ack_irq ( struct irq_data * d )
{
irq_chip_ack_parent ( d ) ;
}
static void dw_msi_mask_irq ( struct irq_data * d )
{
pci_msi_mask_irq ( d ) ;
irq_chip_mask_parent ( d ) ;
}
static void dw_msi_unmask_irq ( struct irq_data * d )
{
pci_msi_unmask_irq ( d ) ;
irq_chip_unmask_parent ( d ) ;
}
static struct irq_chip dw_pcie_msi_irq_chip = {
2017-02-15 18:48:17 +05:30
. name = " PCI-MSI " ,
2018-03-06 11:54:53 +00:00
. irq_ack = dw_msi_ack_irq ,
. irq_mask = dw_msi_mask_irq ,
. irq_unmask = dw_msi_unmask_irq ,
} ;
static struct msi_domain_info dw_pcie_msi_domain_info = {
. flags = ( MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI ) ,
. chip = & dw_pcie_msi_irq_chip ,
2017-02-15 18:48:17 +05:30
} ;
/* MSI int handler */
irqreturn_t dw_handle_msi_irq ( struct pcie_port * pp )
{
int i , pos , irq ;
2019-09-04 18:03:38 +02:00
unsigned long val ;
u32 status , num_ctrls ;
2017-02-15 18:48:17 +05:30
irqreturn_t ret = IRQ_NONE ;
2020-08-20 21:53:42 -06:00
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:55 +00:00
num_ctrls = pp - > num_vectors / MAX_MSI_IRQS_PER_CTRL ;
for ( i = 0 ; i < num_ctrls ; i + + ) {
2020-08-20 21:53:42 -06:00
status = dw_pcie_readl_dbi ( pci , PCIE_MSI_INTR0_STATUS +
( i * MSI_REG_CTRL_BLOCK_SIZE ) ) ;
2019-09-04 18:03:38 +02:00
if ( ! status )
2017-03-16 14:34:59 -05:00
continue ;
ret = IRQ_HANDLED ;
2019-09-04 18:03:38 +02:00
val = status ;
2017-03-16 14:34:59 -05:00
pos = 0 ;
2019-09-04 18:03:38 +02:00
while ( ( pos = find_next_bit ( & val , MAX_MSI_IRQS_PER_CTRL ,
2018-05-14 16:09:50 +01:00
pos ) ) ! = MAX_MSI_IRQS_PER_CTRL ) {
irq = irq_find_mapping ( pp - > irq_domain ,
( i * MAX_MSI_IRQS_PER_CTRL ) +
pos ) ;
2017-08-10 16:54:55 +05:30
generic_handle_irq ( irq ) ;
2017-03-16 14:34:59 -05:00
pos + + ;
2017-02-15 18:48:17 +05:30
}
}
return ret ;
}
2018-03-06 11:54:53 +00:00
/* Chained MSI interrupt service routine */
static void dw_chained_msi_isr ( struct irq_desc * desc )
2017-02-15 18:48:17 +05:30
{
2018-03-06 11:54:53 +00:00
struct irq_chip * chip = irq_desc_get_chip ( desc ) ;
struct pcie_port * pp ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
chained_irq_enter ( chip , desc ) ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
pp = irq_desc_get_handler_data ( desc ) ;
dw_handle_msi_irq ( pp ) ;
chained_irq_exit ( chip , desc ) ;
2017-02-15 18:48:17 +05:30
}
2019-01-31 19:17:03 +01:00
static void dw_pci_setup_msi_msg ( struct irq_data * d , struct msi_msg * msg )
2017-02-15 18:48:17 +05:30
{
2019-01-31 19:17:03 +01:00
struct pcie_port * pp = irq_data_get_irq_chip_data ( d ) ;
2018-03-06 11:54:53 +00:00
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
u64 msi_target ;
2017-02-15 18:48:17 +05:30
2019-03-21 15:29:26 +05:30
msi_target = ( u64 ) pp - > msi_data ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
msg - > address_lo = lower_32_bits ( msi_target ) ;
msg - > address_hi = upper_32_bits ( msi_target ) ;
2017-02-15 18:48:17 +05:30
2019-03-21 15:29:26 +05:30
msg - > data = d - > hwirq ;
2018-03-06 11:54:53 +00:00
dev_dbg ( pci - > dev , " msi#%d address_hi %#x address_lo %#x \n " ,
2019-01-31 19:17:03 +01:00
( int ) d - > hwirq , msg - > address_hi , msg - > address_lo ) ;
2017-02-15 18:48:17 +05:30
}
2019-01-31 19:17:04 +01:00
static int dw_pci_msi_set_affinity ( struct irq_data * d ,
2018-03-06 11:54:53 +00:00
const struct cpumask * mask , bool force )
2017-02-15 18:48:17 +05:30
{
2018-03-06 11:54:53 +00:00
return - EINVAL ;
2017-02-15 18:48:17 +05:30
}
2019-01-31 19:17:02 +01:00
static void dw_pci_bottom_mask ( struct irq_data * d )
2017-02-15 18:48:17 +05:30
{
2019-01-31 19:17:02 +01:00
struct pcie_port * pp = irq_data_get_irq_chip_data ( d ) ;
2020-08-20 21:53:42 -06:00
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
2018-03-06 11:54:53 +00:00
unsigned int res , bit , ctrl ;
unsigned long flags ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
raw_spin_lock_irqsave ( & pp - > lock , flags ) ;
2017-02-15 18:48:17 +05:30
2019-03-21 15:29:26 +05:30
ctrl = d - > hwirq / MAX_MSI_IRQS_PER_CTRL ;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE ;
bit = d - > hwirq % MAX_MSI_IRQS_PER_CTRL ;
2017-02-15 18:48:17 +05:30
2019-03-21 15:29:26 +05:30
pp - > irq_mask [ ctrl ] | = BIT ( bit ) ;
2020-08-20 21:53:42 -06:00
dw_pcie_writel_dbi ( pci , PCIE_MSI_INTR0_MASK + res , pp - > irq_mask [ ctrl ] ) ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
raw_spin_unlock_irqrestore ( & pp - > lock , flags ) ;
2017-02-15 18:48:17 +05:30
}
2019-01-31 19:17:02 +01:00
static void dw_pci_bottom_unmask ( struct irq_data * d )
2017-02-15 18:48:17 +05:30
{
2019-01-31 19:17:02 +01:00
struct pcie_port * pp = irq_data_get_irq_chip_data ( d ) ;
2020-08-20 21:53:42 -06:00
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
2018-03-06 11:54:53 +00:00
unsigned int res , bit , ctrl ;
unsigned long flags ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
raw_spin_lock_irqsave ( & pp - > lock , flags ) ;
2017-02-15 18:48:17 +05:30
2019-03-21 15:29:26 +05:30
ctrl = d - > hwirq / MAX_MSI_IRQS_PER_CTRL ;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE ;
bit = d - > hwirq % MAX_MSI_IRQS_PER_CTRL ;
2017-02-15 18:48:17 +05:30
2019-03-21 15:29:26 +05:30
pp - > irq_mask [ ctrl ] & = ~ BIT ( bit ) ;
2020-08-20 21:53:42 -06:00
dw_pcie_writel_dbi ( pci , PCIE_MSI_INTR0_MASK + res , pp - > irq_mask [ ctrl ] ) ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
raw_spin_unlock_irqrestore ( & pp - > lock , flags ) ;
2017-02-15 18:48:17 +05:30
}
2018-03-06 11:54:53 +00:00
static void dw_pci_bottom_ack ( struct irq_data * d )
2017-02-15 18:48:17 +05:30
{
2018-11-13 22:57:34 +00:00
struct pcie_port * pp = irq_data_get_irq_chip_data ( d ) ;
2020-08-20 21:53:42 -06:00
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
2018-11-13 22:57:34 +00:00
unsigned int res , bit , ctrl ;
2017-02-15 18:48:17 +05:30
2018-11-13 22:57:34 +00:00
ctrl = d - > hwirq / MAX_MSI_IRQS_PER_CTRL ;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE ;
bit = d - > hwirq % MAX_MSI_IRQS_PER_CTRL ;
2017-02-15 18:48:17 +05:30
2020-08-20 21:53:42 -06:00
dw_pcie_writel_dbi ( pci , PCIE_MSI_INTR0_STATUS + res , BIT ( bit ) ) ;
2017-02-15 18:48:17 +05:30
}
2018-03-06 11:54:53 +00:00
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
. name = " DWPCI-MSI " ,
. irq_ack = dw_pci_bottom_ack ,
. irq_compose_msi_msg = dw_pci_setup_msi_msg ,
. irq_set_affinity = dw_pci_msi_set_affinity ,
. irq_mask = dw_pci_bottom_mask ,
. irq_unmask = dw_pci_bottom_unmask ,
} ;
static int dw_pcie_irq_domain_alloc ( struct irq_domain * domain ,
unsigned int virq , unsigned int nr_irqs ,
void * args )
2017-02-15 18:48:17 +05:30
{
2018-03-06 11:54:53 +00:00
struct pcie_port * pp = domain - > host_data ;
unsigned long flags ;
u32 i ;
int bit ;
raw_spin_lock_irqsave ( & pp - > lock , flags ) ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
bit = bitmap_find_free_region ( pp - > msi_irq_in_use , pp - > num_vectors ,
order_base_2 ( nr_irqs ) ) ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
raw_spin_unlock_irqrestore ( & pp - > lock , flags ) ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
if ( bit < 0 )
return - ENOSPC ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
for ( i = 0 ; i < nr_irqs ; i + + )
irq_domain_set_info ( domain , virq + i , bit + i ,
2019-03-21 15:29:24 +05:30
pp - > msi_irq_chip ,
2018-03-06 11:54:53 +00:00
pp , handle_edge_irq ,
NULL , NULL ) ;
2017-02-15 18:48:17 +05:30
return 0 ;
}
2018-03-06 11:54:53 +00:00
static void dw_pcie_irq_domain_free ( struct irq_domain * domain ,
unsigned int virq , unsigned int nr_irqs )
2017-02-15 18:48:17 +05:30
{
2019-01-31 19:17:05 +01:00
struct irq_data * d = irq_domain_get_irq_data ( domain , virq ) ;
2019-12-20 15:35:50 +05:30
struct pcie_port * pp = domain - > host_data ;
2018-03-06 11:54:53 +00:00
unsigned long flags ;
raw_spin_lock_irqsave ( & pp - > lock , flags ) ;
2018-05-14 16:09:48 +01:00
2019-01-31 19:17:05 +01:00
bitmap_release_region ( pp - > msi_irq_in_use , d - > hwirq ,
2018-03-06 11:54:53 +00:00
order_base_2 ( nr_irqs ) ) ;
2018-05-14 16:09:48 +01:00
2018-03-06 11:54:53 +00:00
raw_spin_unlock_irqrestore ( & pp - > lock , flags ) ;
2017-02-15 18:48:17 +05:30
}
2018-03-06 11:54:53 +00:00
static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
. alloc = dw_pcie_irq_domain_alloc ,
. free = dw_pcie_irq_domain_free ,
2017-02-15 18:48:17 +05:30
} ;
2018-03-06 11:54:53 +00:00
int dw_pcie_allocate_domains ( struct pcie_port * pp )
2017-02-15 18:48:17 +05:30
{
2018-03-06 11:54:53 +00:00
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
struct fwnode_handle * fwnode = of_node_to_fwnode ( pci - > dev - > of_node ) ;
pp - > irq_domain = irq_domain_create_linear ( fwnode , pp - > num_vectors ,
& dw_pcie_msi_domain_ops , pp ) ;
if ( ! pp - > irq_domain ) {
2018-05-14 16:09:48 +01:00
dev_err ( pci - > dev , " Failed to create IRQ domain \n " ) ;
2018-03-06 11:54:53 +00:00
return - ENOMEM ;
}
2020-05-01 12:39:21 +01:00
irq_domain_update_bus_token ( pp - > irq_domain , DOMAIN_BUS_NEXUS ) ;
2018-03-06 11:54:53 +00:00
pp - > msi_domain = pci_msi_create_irq_domain ( fwnode ,
& dw_pcie_msi_domain_info ,
pp - > irq_domain ) ;
if ( ! pp - > msi_domain ) {
2018-05-14 16:09:48 +01:00
dev_err ( pci - > dev , " Failed to create MSI domain \n " ) ;
2018-03-06 11:54:53 +00:00
irq_domain_remove ( pp - > irq_domain ) ;
return - ENOMEM ;
}
2017-02-15 18:48:17 +05:30
return 0 ;
}
2018-03-06 11:54:53 +00:00
void dw_pcie_free_msi ( struct pcie_port * pp )
{
2019-03-29 11:56:25 +00:00
if ( pp - > msi_irq ) {
irq_set_chained_handler ( pp - > msi_irq , NULL ) ;
irq_set_handler_data ( pp - > msi_irq , NULL ) ;
}
2018-03-06 11:54:53 +00:00
irq_domain_remove ( pp - > msi_domain ) ;
irq_domain_remove ( pp - > irq_domain ) ;
2019-03-29 11:57:17 +00:00
if ( pp - > msi_page )
__free_page ( pp - > msi_page ) ;
2018-03-06 11:54:53 +00:00
}
2017-02-15 18:48:17 +05:30
void dw_pcie_msi_init ( struct pcie_port * pp )
{
2017-12-20 00:29:22 +01:00
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
struct device * dev = pci - > dev ;
2017-02-15 18:48:17 +05:30
u64 msi_target ;
2020-08-20 21:54:01 -06:00
if ( ! IS_ENABLED ( CONFIG_PCI_MSI ) )
return ;
2019-03-29 11:57:17 +00:00
pp - > msi_page = alloc_page ( GFP_KERNEL ) ;
pp - > msi_data = dma_map_page ( dev , pp - > msi_page , 0 , PAGE_SIZE ,
DMA_FROM_DEVICE ) ;
2017-12-20 00:29:22 +01:00
if ( dma_mapping_error ( dev , pp - > msi_data ) ) {
2018-05-14 16:09:48 +01:00
dev_err ( dev , " Failed to map MSI data \n " ) ;
2019-03-29 11:57:17 +00:00
__free_page ( pp - > msi_page ) ;
pp - > msi_page = NULL ;
2017-12-20 00:29:22 +01:00
return ;
}
msi_target = ( u64 ) pp - > msi_data ;
2017-02-15 18:48:17 +05:30
2018-05-14 16:09:48 +01:00
/* Program the msi_data */
2020-08-20 21:53:42 -06:00
dw_pcie_writel_dbi ( pci , PCIE_MSI_ADDR_LO , lower_32_bits ( msi_target ) ) ;
dw_pcie_writel_dbi ( pci , PCIE_MSI_ADDR_HI , upper_32_bits ( msi_target ) ) ;
2017-02-15 18:48:17 +05:30
}
2019-06-25 14:52:38 +05:30
EXPORT_SYMBOL_GPL ( dw_pcie_msi_init ) ;
2017-02-15 18:48:17 +05:30
int dw_pcie_host_init ( struct pcie_port * pp )
{
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
struct device * dev = pci - > dev ;
struct device_node * np = dev - > of_node ;
struct platform_device * pdev = to_platform_device ( dev ) ;
2019-10-28 11:32:36 -05:00
struct resource_entry * win ;
2017-06-28 15:13:56 -05:00
struct pci_host_bridge * bridge ;
2017-02-15 18:48:17 +05:30
struct resource * cfg_res ;
2018-03-06 11:54:53 +00:00
int ret ;
raw_spin_lock_init ( & pci - > pp . lock ) ;
2017-02-15 18:48:17 +05:30
cfg_res = platform_get_resource_byname ( pdev , IORESOURCE_MEM , " config " ) ;
if ( cfg_res ) {
2020-08-20 21:53:59 -06:00
pp - > cfg0_size = resource_size ( cfg_res ) ;
2017-02-15 18:48:17 +05:30
pp - > cfg0_base = cfg_res - > start ;
} else if ( ! pp - > va_cfg0_base ) {
2018-05-14 16:09:48 +01:00
dev_err ( dev , " Missing *config* reg space \n " ) ;
2017-02-15 18:48:17 +05:30
}
2019-03-29 11:58:53 +00:00
bridge = devm_pci_alloc_host_bridge ( dev , 0 ) ;
2017-06-28 15:13:56 -05:00
if ( ! bridge )
return - ENOMEM ;
2020-08-20 21:53:43 -06:00
pp - > bridge = bridge ;
2017-02-15 18:48:17 +05:30
/* Get the I/O and memory ranges from DT */
2019-10-28 11:32:36 -05:00
resource_list_for_each_entry ( win , & bridge - > windows ) {
2017-02-15 18:48:17 +05:30
switch ( resource_type ( win - > res ) ) {
case IORESOURCE_IO :
2020-08-20 21:53:58 -06:00
pp - > io_size = resource_size ( win - > res ) ;
pp - > io_bus_addr = win - > res - > start - win - > offset ;
pp - > io_base = pci_pio_to_address ( win - > res - > start ) ;
2017-02-15 18:48:17 +05:30
break ;
case 0 :
2020-08-20 21:53:59 -06:00
dev_err ( dev , " Missing *config* reg space \n " ) ;
pp - > cfg0_size = resource_size ( win - > res ) ;
pp - > cfg0_base = win - > res - > start ;
if ( ! pci - > dbi_base ) {
pci - > dbi_base = devm_pci_remap_cfgspace ( dev ,
pp - > cfg0_base ,
pp - > cfg0_size ) ;
if ( ! pci - > dbi_base ) {
dev_err ( dev , " Error with ioremap \n " ) ;
return - ENOMEM ;
}
}
2017-02-15 18:48:17 +05:30
break ;
}
}
if ( ! pp - > va_cfg0_base ) {
2017-04-19 17:49:03 +01:00
pp - > va_cfg0_base = devm_pci_remap_cfgspace ( dev ,
pp - > cfg0_base , pp - > cfg0_size ) ;
2017-02-15 18:48:17 +05:30
if ( ! pp - > va_cfg0_base ) {
2018-05-14 16:09:48 +01:00
dev_err ( dev , " Error with ioremap in function \n " ) ;
2019-03-29 11:58:53 +00:00
return - ENOMEM ;
2017-02-15 18:48:17 +05:30
}
}
ret = of_property_read_u32 ( np , " num-viewport " , & pci - > num_viewport ) ;
if ( ret )
pci - > num_viewport = 2 ;
2020-08-20 21:54:14 -06:00
if ( pci - > link_gen < 1 )
pci - > link_gen = of_pci_get_max_link_speed ( np ) ;
2019-03-29 11:57:54 +00:00
if ( pci_msi_enabled ( ) ) {
2018-03-06 11:54:53 +00:00
/*
* If a specific SoC driver needs to change the
* default number of vectors , it needs to implement
* the set_num_vectors callback .
*/
if ( ! pp - > ops - > set_num_vectors ) {
pp - > num_vectors = MSI_DEF_NUM_VECTORS ;
} else {
pp - > ops - > set_num_vectors ( pp ) ;
if ( pp - > num_vectors > MAX_MSI_IRQS | |
pp - > num_vectors = = 0 ) {
dev_err ( dev ,
" Invalid number of vectors \n " ) ;
2019-03-29 11:58:53 +00:00
return - EINVAL ;
2017-02-15 18:48:17 +05:30
}
2018-03-06 11:54:53 +00:00
}
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
if ( ! pp - > ops - > msi_host_init ) {
2019-03-21 15:29:25 +05:30
pp - > msi_irq_chip = & dw_pci_msi_bottom_irq_chip ;
2018-03-06 11:54:53 +00:00
ret = dw_pcie_allocate_domains ( pp ) ;
if ( ret )
2019-03-29 11:58:53 +00:00
return ret ;
2018-03-06 11:54:53 +00:00
if ( pp - > msi_irq )
irq_set_chained_handler_and_data ( pp - > msi_irq ,
dw_chained_msi_isr ,
pp ) ;
2017-02-15 18:48:17 +05:30
} else {
2018-03-06 11:54:54 +00:00
ret = pp - > ops - > msi_host_init ( pp ) ;
2017-02-15 18:48:17 +05:30
if ( ret < 0 )
2019-03-29 11:58:53 +00:00
return ret ;
2017-02-15 18:48:17 +05:30
}
}
2020-08-20 21:53:43 -06:00
/* Set default bus ops */
bridge - > ops = & dw_pcie_ops ;
2020-08-20 21:53:53 -06:00
bridge - > child_ops = & dw_child_pcie_ops ;
2020-08-20 21:53:43 -06:00
2017-07-15 23:39:45 -07:00
if ( pp - > ops - > host_init ) {
ret = pp - > ops - > host_init ( pp ) ;
if ( ret )
2019-03-29 11:57:54 +00:00
goto err_free_msi ;
2017-07-15 23:39:45 -07:00
}
2017-02-15 18:48:17 +05:30
2017-06-28 15:13:56 -05:00
bridge - > sysdata = pp ;
2017-02-15 18:48:17 +05:30
2020-08-20 21:53:56 -06:00
ret = pci_host_probe ( bridge ) ;
if ( ! ret )
return 0 ;
2017-02-15 18:48:17 +05:30
2019-03-29 11:57:54 +00:00
err_free_msi :
if ( pci_msi_enabled ( ) & & ! pp - > ops - > msi_host_init )
dw_pcie_free_msi ( pp ) ;
2017-02-15 18:48:17 +05:30
return ret ;
}
2019-06-25 14:52:38 +05:30
EXPORT_SYMBOL_GPL ( dw_pcie_host_init ) ;
2017-02-15 18:48:17 +05:30
2019-06-25 14:52:36 +05:30
void dw_pcie_host_deinit ( struct pcie_port * pp )
{
2020-08-20 21:53:57 -06:00
pci_stop_root_bus ( pp - > bridge - > bus ) ;
pci_remove_root_bus ( pp - > bridge - > bus ) ;
2019-06-25 14:52:36 +05:30
if ( pci_msi_enabled ( ) & & ! pp - > ops - > msi_host_init )
dw_pcie_free_msi ( pp ) ;
}
2019-06-25 14:52:38 +05:30
EXPORT_SYMBOL_GPL ( dw_pcie_host_deinit ) ;
2019-06-25 14:52:36 +05:30
2020-08-20 21:53:53 -06:00
static void __iomem * dw_pcie_other_conf_map_bus ( struct pci_bus * bus ,
unsigned int devfn , int where )
2017-02-15 18:48:17 +05:30
{
2020-08-20 21:53:53 -06:00
int type ;
2020-08-20 21:53:59 -06:00
u32 busdev ;
2020-08-20 21:53:53 -06:00
struct pcie_port * pp = bus - > sysdata ;
2017-02-15 18:48:17 +05:30
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
busdev = PCIE_ATU_BUS ( bus - > number ) | PCIE_ATU_DEV ( PCI_SLOT ( devfn ) ) |
PCIE_ATU_FUNC ( PCI_FUNC ( devfn ) ) ;
2020-08-20 21:53:59 -06:00
if ( pci_is_root_bus ( bus - > parent ) )
2017-02-15 18:48:17 +05:30
type = PCIE_ATU_TYPE_CFG0 ;
2020-08-20 21:53:59 -06:00
else
2017-02-15 18:48:17 +05:30
type = PCIE_ATU_TYPE_CFG1 ;
2020-08-20 21:53:59 -06:00
2017-02-15 18:48:17 +05:30
dw_pcie_prog_outbound_atu ( pci , PCIE_ATU_REGION_INDEX1 ,
2020-08-20 21:53:59 -06:00
type , pp - > cfg0_base ,
busdev , pp - > cfg0_size ) ;
2019-02-19 12:02:39 -08:00
2020-08-20 21:53:59 -06:00
return pp - > va_cfg0_base + where ;
2019-02-19 12:02:39 -08:00
}
2020-08-20 21:53:53 -06:00
static int dw_pcie_rd_other_conf ( struct pci_bus * bus , unsigned int devfn ,
int where , int size , u32 * val )
2017-02-15 18:48:17 +05:30
{
2020-08-20 21:53:53 -06:00
int ret ;
2017-02-15 18:48:17 +05:30
struct pcie_port * pp = bus - > sysdata ;
2020-08-20 21:53:53 -06:00
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
2017-02-15 18:48:17 +05:30
2020-08-20 21:53:53 -06:00
ret = pci_generic_config_read ( bus , devfn , where , size , val ) ;
2017-02-15 18:48:17 +05:30
2020-08-20 21:53:53 -06:00
if ( ! ret & & pci - > num_viewport < = 2 )
dw_pcie_prog_outbound_atu ( pci , PCIE_ATU_REGION_INDEX1 ,
PCIE_ATU_TYPE_IO , pp - > io_base ,
pp - > io_bus_addr , pp - > io_size ) ;
2017-02-15 18:48:17 +05:30
2020-08-20 21:53:53 -06:00
return ret ;
2017-02-15 18:48:17 +05:30
}
2020-08-20 21:53:53 -06:00
static int dw_pcie_wr_other_conf ( struct pci_bus * bus , unsigned int devfn ,
int where , int size , u32 val )
2017-02-15 18:48:17 +05:30
{
2020-08-20 21:53:53 -06:00
int ret ;
2017-02-15 18:48:17 +05:30
struct pcie_port * pp = bus - > sysdata ;
2020-08-20 21:53:53 -06:00
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
2017-02-15 18:48:17 +05:30
2020-08-20 21:53:53 -06:00
ret = pci_generic_config_write ( bus , devfn , where , size , val ) ;
2017-02-15 18:48:17 +05:30
2020-08-20 21:53:53 -06:00
if ( ! ret & & pci - > num_viewport < = 2 )
dw_pcie_prog_outbound_atu ( pci , PCIE_ATU_REGION_INDEX1 ,
PCIE_ATU_TYPE_IO , pp - > io_base ,
pp - > io_bus_addr , pp - > io_size ) ;
2017-02-15 18:48:17 +05:30
2020-08-20 21:53:53 -06:00
return ret ;
2017-02-15 18:48:17 +05:30
}
2020-08-20 21:53:53 -06:00
static struct pci_ops dw_child_pcie_ops = {
. map_bus = dw_pcie_other_conf_map_bus ,
. read = dw_pcie_rd_other_conf ,
. write = dw_pcie_wr_other_conf ,
} ;
2020-08-20 21:53:44 -06:00
void __iomem * dw_pcie_own_conf_map_bus ( struct pci_bus * bus , unsigned int devfn , int where )
{
struct pcie_port * pp = bus - > sysdata ;
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
if ( PCI_SLOT ( devfn ) > 0 )
return NULL ;
return pci - > dbi_base + where ;
}
EXPORT_SYMBOL_GPL ( dw_pcie_own_conf_map_bus ) ;
2017-02-15 18:48:17 +05:30
static struct pci_ops dw_pcie_ops = {
2020-08-20 21:53:53 -06:00
. map_bus = dw_pcie_own_conf_map_bus ,
. read = pci_generic_config_read ,
. write = pci_generic_config_write ,
2017-02-15 18:48:17 +05:30
} ;
void dw_pcie_setup_rc ( struct pcie_port * pp )
{
2018-03-06 11:54:55 +00:00
u32 val , ctrl , num_ctrls ;
2017-02-15 18:48:17 +05:30
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
2019-08-13 17:06:17 +05:30
/*
* Enable DBI read - only registers for writing / updating configuration .
* Write permission gets disabled towards the end of this function .
*/
dw_pcie_dbi_ro_wr_en ( pci ) ;
2017-02-15 18:48:17 +05:30
dw_pcie_setup ( pci ) ;
2019-03-21 15:29:27 +05:30
if ( ! pp - > ops - > msi_host_init ) {
num_ctrls = pp - > num_vectors / MAX_MSI_IRQS_PER_CTRL ;
/* Initialize IRQ Status array */
for ( ctrl = 0 ; ctrl < num_ctrls ; ctrl + + ) {
pp - > irq_mask [ ctrl ] = ~ 0 ;
2020-08-20 21:53:42 -06:00
dw_pcie_writel_dbi ( pci , PCIE_MSI_INTR0_MASK +
2019-03-21 15:29:27 +05:30
( ctrl * MSI_REG_CTRL_BLOCK_SIZE ) ,
2020-08-20 21:53:42 -06:00
pp - > irq_mask [ ctrl ] ) ;
dw_pcie_writel_dbi ( pci , PCIE_MSI_INTR0_ENABLE +
2019-03-21 15:29:27 +05:30
( ctrl * MSI_REG_CTRL_BLOCK_SIZE ) ,
2020-08-20 21:53:42 -06:00
~ 0 ) ;
2019-03-21 15:29:27 +05:30
}
2018-11-13 22:57:32 +00:00
}
2018-05-14 16:09:48 +01:00
/* Setup RC BARs */
2017-02-15 18:48:17 +05:30
dw_pcie_writel_dbi ( pci , PCI_BASE_ADDRESS_0 , 0x00000004 ) ;
dw_pcie_writel_dbi ( pci , PCI_BASE_ADDRESS_1 , 0x00000000 ) ;
2018-05-14 16:09:48 +01:00
/* Setup interrupt pins */
2017-02-15 18:48:17 +05:30
val = dw_pcie_readl_dbi ( pci , PCI_INTERRUPT_LINE ) ;
val & = 0xffff00ff ;
val | = 0x00000100 ;
dw_pcie_writel_dbi ( pci , PCI_INTERRUPT_LINE , val ) ;
2018-05-14 16:09:48 +01:00
/* Setup bus numbers */
2017-02-15 18:48:17 +05:30
val = dw_pcie_readl_dbi ( pci , PCI_PRIMARY_BUS ) ;
val & = 0xff000000 ;
2018-03-07 10:46:39 -06:00
val | = 0x00ff0100 ;
2017-02-15 18:48:17 +05:30
dw_pcie_writel_dbi ( pci , PCI_PRIMARY_BUS , val ) ;
2018-05-14 16:09:48 +01:00
/* Setup command register */
2017-02-15 18:48:17 +05:30
val = dw_pcie_readl_dbi ( pci , PCI_COMMAND ) ;
val & = 0xffff0000 ;
val | = PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR ;
dw_pcie_writel_dbi ( pci , PCI_COMMAND , val ) ;
/*
2020-08-20 21:53:43 -06:00
* If the platform provides its own child bus config accesses , it means
* the platform uses its own address translation component rather than
* ATU , so we should not program the ATU here .
2017-02-15 18:48:17 +05:30
*/
2020-08-20 21:53:53 -06:00
if ( pp - > bridge - > child_ops = = & dw_child_pcie_ops ) {
2020-08-20 21:53:58 -06:00
struct resource_entry * entry =
resource_list_first_type ( & pp - > bridge - > windows , IORESOURCE_MEM ) ;
2017-02-15 18:48:17 +05:30
dw_pcie_prog_outbound_atu ( pci , PCIE_ATU_REGION_INDEX0 ,
2020-08-20 21:53:58 -06:00
PCIE_ATU_TYPE_MEM , entry - > res - > start ,
entry - > res - > start - entry - > offset ,
resource_size ( entry - > res ) ) ;
2017-02-15 18:48:17 +05:30
if ( pci - > num_viewport > 2 )
dw_pcie_prog_outbound_atu ( pci , PCIE_ATU_REGION_INDEX2 ,
PCIE_ATU_TYPE_IO , pp - > io_base ,
pp - > io_bus_addr , pp - > io_size ) ;
}
2020-08-20 21:53:42 -06:00
dw_pcie_writel_dbi ( pci , PCI_BASE_ADDRESS_0 , 0 ) ;
2017-02-15 18:48:17 +05:30
2018-05-14 16:09:48 +01:00
/* Program correct class for RC */
2020-08-20 21:53:42 -06:00
dw_pcie_writew_dbi ( pci , PCI_CLASS_DEVICE , PCI_CLASS_BRIDGE_PCI ) ;
2017-02-15 18:48:17 +05:30
2020-08-20 21:53:42 -06:00
val = dw_pcie_readl_dbi ( pci , PCIE_LINK_WIDTH_SPEED_CONTROL ) ;
2017-02-15 18:48:17 +05:30
val | = PORT_LOGIC_SPEED_CHANGE ;
2020-08-20 21:53:42 -06:00
dw_pcie_writel_dbi ( pci , PCIE_LINK_WIDTH_SPEED_CONTROL , val ) ;
2019-08-13 17:06:17 +05:30
dw_pcie_dbi_ro_wr_dis ( pci ) ;
2017-02-15 18:48:17 +05:30
}
2019-06-25 14:52:38 +05:30
EXPORT_SYMBOL_GPL ( dw_pcie_setup_rc ) ;