2018-01-26 12:50:27 -06:00
// SPDX-License-Identifier: GPL-2.0
2017-02-15 18:48:17 +05:30
/*
2017-09-01 16:35:50 -05:00
* Synopsys DesignWare PCIe host controller driver
2017-02-15 18:48:17 +05:30
*
* Copyright ( C ) 2013 Samsung Electronics Co . , Ltd .
* http : //www.samsung.com
*
* Author : Jingoo Han < jg1 . han @ samsung . com >
*/
2018-03-06 11:54:53 +00:00
# include <linux/irqchip/chained_irq.h>
2017-02-15 18:48:17 +05:30
# include <linux/irqdomain.h>
# include <linux/of_address.h>
# include <linux/of_pci.h>
# include <linux/pci_regs.h>
# include <linux/platform_device.h>
2018-05-31 09:12:37 +08:00
# include "../../pci.h"
2017-02-15 18:48:17 +05:30
# include "pcie-designware.h"
static struct pci_ops dw_pcie_ops ;
static int dw_pcie_rd_own_conf ( struct pcie_port * pp , int where , int size ,
u32 * val )
{
struct dw_pcie * pci ;
if ( pp - > ops - > rd_own_conf )
return pp - > ops - > rd_own_conf ( pp , where , size , val ) ;
pci = to_dw_pcie_from_pp ( pp ) ;
return dw_pcie_read ( pci - > dbi_base + where , size , val ) ;
}
static int dw_pcie_wr_own_conf ( struct pcie_port * pp , int where , int size ,
u32 val )
{
struct dw_pcie * pci ;
if ( pp - > ops - > wr_own_conf )
return pp - > ops - > wr_own_conf ( pp , where , size , val ) ;
pci = to_dw_pcie_from_pp ( pp ) ;
return dw_pcie_write ( pci - > dbi_base + where , size , val ) ;
}
2018-03-06 11:54:53 +00:00
static void dw_msi_ack_irq ( struct irq_data * d )
{
irq_chip_ack_parent ( d ) ;
}
static void dw_msi_mask_irq ( struct irq_data * d )
{
pci_msi_mask_irq ( d ) ;
irq_chip_mask_parent ( d ) ;
}
static void dw_msi_unmask_irq ( struct irq_data * d )
{
pci_msi_unmask_irq ( d ) ;
irq_chip_unmask_parent ( d ) ;
}
static struct irq_chip dw_pcie_msi_irq_chip = {
2017-02-15 18:48:17 +05:30
. name = " PCI-MSI " ,
2018-03-06 11:54:53 +00:00
. irq_ack = dw_msi_ack_irq ,
. irq_mask = dw_msi_mask_irq ,
. irq_unmask = dw_msi_unmask_irq ,
} ;
static struct msi_domain_info dw_pcie_msi_domain_info = {
. flags = ( MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI ) ,
. chip = & dw_pcie_msi_irq_chip ,
2017-02-15 18:48:17 +05:30
} ;
/* MSI int handler */
irqreturn_t dw_handle_msi_irq ( struct pcie_port * pp )
{
int i , pos , irq ;
2018-03-06 11:54:55 +00:00
u32 val , num_ctrls ;
2017-02-15 18:48:17 +05:30
irqreturn_t ret = IRQ_NONE ;
2018-03-06 11:54:55 +00:00
num_ctrls = pp - > num_vectors / MAX_MSI_IRQS_PER_CTRL ;
for ( i = 0 ; i < num_ctrls ; i + + ) {
2018-05-14 16:09:50 +01:00
dw_pcie_rd_own_conf ( pp , PCIE_MSI_INTR0_STATUS +
( i * MSI_REG_CTRL_BLOCK_SIZE ) ,
4 , & val ) ;
2017-03-16 14:34:59 -05:00
if ( ! val )
continue ;
ret = IRQ_HANDLED ;
pos = 0 ;
2018-05-14 16:09:50 +01:00
while ( ( pos = find_next_bit ( ( unsigned long * ) & val ,
MAX_MSI_IRQS_PER_CTRL ,
pos ) ) ! = MAX_MSI_IRQS_PER_CTRL ) {
irq = irq_find_mapping ( pp - > irq_domain ,
( i * MAX_MSI_IRQS_PER_CTRL ) +
pos ) ;
2017-08-10 16:54:55 +05:30
generic_handle_irq ( irq ) ;
2017-03-16 14:34:59 -05:00
pos + + ;
2017-02-15 18:48:17 +05:30
}
}
return ret ;
}
2018-03-06 11:54:53 +00:00
/* Chained MSI interrupt service routine */
static void dw_chained_msi_isr ( struct irq_desc * desc )
2017-02-15 18:48:17 +05:30
{
2018-03-06 11:54:53 +00:00
struct irq_chip * chip = irq_desc_get_chip ( desc ) ;
struct pcie_port * pp ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
chained_irq_enter ( chip , desc ) ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
pp = irq_desc_get_handler_data ( desc ) ;
dw_handle_msi_irq ( pp ) ;
chained_irq_exit ( chip , desc ) ;
2017-02-15 18:48:17 +05:30
}
2019-01-31 19:17:03 +01:00
static void dw_pci_setup_msi_msg ( struct irq_data * d , struct msi_msg * msg )
2017-02-15 18:48:17 +05:30
{
2019-01-31 19:17:03 +01:00
struct pcie_port * pp = irq_data_get_irq_chip_data ( d ) ;
2018-03-06 11:54:53 +00:00
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
u64 msi_target ;
2017-02-15 18:48:17 +05:30
2019-03-21 15:29:26 +05:30
msi_target = ( u64 ) pp - > msi_data ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
msg - > address_lo = lower_32_bits ( msi_target ) ;
msg - > address_hi = upper_32_bits ( msi_target ) ;
2017-02-15 18:48:17 +05:30
2019-03-21 15:29:26 +05:30
msg - > data = d - > hwirq ;
2018-03-06 11:54:53 +00:00
dev_dbg ( pci - > dev , " msi#%d address_hi %#x address_lo %#x \n " ,
2019-01-31 19:17:03 +01:00
( int ) d - > hwirq , msg - > address_hi , msg - > address_lo ) ;
2017-02-15 18:48:17 +05:30
}
2019-01-31 19:17:04 +01:00
static int dw_pci_msi_set_affinity ( struct irq_data * d ,
2018-03-06 11:54:53 +00:00
const struct cpumask * mask , bool force )
2017-02-15 18:48:17 +05:30
{
2018-03-06 11:54:53 +00:00
return - EINVAL ;
2017-02-15 18:48:17 +05:30
}
2019-01-31 19:17:02 +01:00
static void dw_pci_bottom_mask ( struct irq_data * d )
2017-02-15 18:48:17 +05:30
{
2019-01-31 19:17:02 +01:00
struct pcie_port * pp = irq_data_get_irq_chip_data ( d ) ;
2018-03-06 11:54:53 +00:00
unsigned int res , bit , ctrl ;
unsigned long flags ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
raw_spin_lock_irqsave ( & pp - > lock , flags ) ;
2017-02-15 18:48:17 +05:30
2019-03-21 15:29:26 +05:30
ctrl = d - > hwirq / MAX_MSI_IRQS_PER_CTRL ;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE ;
bit = d - > hwirq % MAX_MSI_IRQS_PER_CTRL ;
2017-02-15 18:48:17 +05:30
2019-03-21 15:29:26 +05:30
pp - > irq_mask [ ctrl ] | = BIT ( bit ) ;
dw_pcie_wr_own_conf ( pp , PCIE_MSI_INTR0_MASK + res , 4 ,
pp - > irq_mask [ ctrl ] ) ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
raw_spin_unlock_irqrestore ( & pp - > lock , flags ) ;
2017-02-15 18:48:17 +05:30
}
2019-01-31 19:17:02 +01:00
static void dw_pci_bottom_unmask ( struct irq_data * d )
2017-02-15 18:48:17 +05:30
{
2019-01-31 19:17:02 +01:00
struct pcie_port * pp = irq_data_get_irq_chip_data ( d ) ;
2018-03-06 11:54:53 +00:00
unsigned int res , bit , ctrl ;
unsigned long flags ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
raw_spin_lock_irqsave ( & pp - > lock , flags ) ;
2017-02-15 18:48:17 +05:30
2019-03-21 15:29:26 +05:30
ctrl = d - > hwirq / MAX_MSI_IRQS_PER_CTRL ;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE ;
bit = d - > hwirq % MAX_MSI_IRQS_PER_CTRL ;
2017-02-15 18:48:17 +05:30
2019-03-21 15:29:26 +05:30
pp - > irq_mask [ ctrl ] & = ~ BIT ( bit ) ;
dw_pcie_wr_own_conf ( pp , PCIE_MSI_INTR0_MASK + res , 4 ,
pp - > irq_mask [ ctrl ] ) ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
raw_spin_unlock_irqrestore ( & pp - > lock , flags ) ;
2017-02-15 18:48:17 +05:30
}
2018-03-06 11:54:53 +00:00
static void dw_pci_bottom_ack ( struct irq_data * d )
2017-02-15 18:48:17 +05:30
{
2018-11-13 22:57:34 +00:00
struct pcie_port * pp = irq_data_get_irq_chip_data ( d ) ;
unsigned int res , bit , ctrl ;
2017-02-15 18:48:17 +05:30
2018-11-13 22:57:34 +00:00
ctrl = d - > hwirq / MAX_MSI_IRQS_PER_CTRL ;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE ;
bit = d - > hwirq % MAX_MSI_IRQS_PER_CTRL ;
2017-02-15 18:48:17 +05:30
2019-01-31 19:17:07 +01:00
dw_pcie_wr_own_conf ( pp , PCIE_MSI_INTR0_STATUS + res , 4 , BIT ( bit ) ) ;
2017-02-15 18:48:17 +05:30
}
2018-03-06 11:54:53 +00:00
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
. name = " DWPCI-MSI " ,
. irq_ack = dw_pci_bottom_ack ,
. irq_compose_msi_msg = dw_pci_setup_msi_msg ,
. irq_set_affinity = dw_pci_msi_set_affinity ,
. irq_mask = dw_pci_bottom_mask ,
. irq_unmask = dw_pci_bottom_unmask ,
} ;
static int dw_pcie_irq_domain_alloc ( struct irq_domain * domain ,
unsigned int virq , unsigned int nr_irqs ,
void * args )
2017-02-15 18:48:17 +05:30
{
2018-03-06 11:54:53 +00:00
struct pcie_port * pp = domain - > host_data ;
unsigned long flags ;
u32 i ;
int bit ;
raw_spin_lock_irqsave ( & pp - > lock , flags ) ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
bit = bitmap_find_free_region ( pp - > msi_irq_in_use , pp - > num_vectors ,
order_base_2 ( nr_irqs ) ) ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
raw_spin_unlock_irqrestore ( & pp - > lock , flags ) ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
if ( bit < 0 )
return - ENOSPC ;
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
for ( i = 0 ; i < nr_irqs ; i + + )
irq_domain_set_info ( domain , virq + i , bit + i ,
2019-03-21 15:29:24 +05:30
pp - > msi_irq_chip ,
2018-03-06 11:54:53 +00:00
pp , handle_edge_irq ,
NULL , NULL ) ;
2017-02-15 18:48:17 +05:30
return 0 ;
}
2018-03-06 11:54:53 +00:00
static void dw_pcie_irq_domain_free ( struct irq_domain * domain ,
unsigned int virq , unsigned int nr_irqs )
2017-02-15 18:48:17 +05:30
{
2019-01-31 19:17:05 +01:00
struct irq_data * d = irq_domain_get_irq_data ( domain , virq ) ;
struct pcie_port * pp = irq_data_get_irq_chip_data ( d ) ;
2018-03-06 11:54:53 +00:00
unsigned long flags ;
raw_spin_lock_irqsave ( & pp - > lock , flags ) ;
2018-05-14 16:09:48 +01:00
2019-01-31 19:17:05 +01:00
bitmap_release_region ( pp - > msi_irq_in_use , d - > hwirq ,
2018-03-06 11:54:53 +00:00
order_base_2 ( nr_irqs ) ) ;
2018-05-14 16:09:48 +01:00
2018-03-06 11:54:53 +00:00
raw_spin_unlock_irqrestore ( & pp - > lock , flags ) ;
2017-02-15 18:48:17 +05:30
}
2018-03-06 11:54:53 +00:00
static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
. alloc = dw_pcie_irq_domain_alloc ,
. free = dw_pcie_irq_domain_free ,
2017-02-15 18:48:17 +05:30
} ;
2018-03-06 11:54:53 +00:00
int dw_pcie_allocate_domains ( struct pcie_port * pp )
2017-02-15 18:48:17 +05:30
{
2018-03-06 11:54:53 +00:00
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
struct fwnode_handle * fwnode = of_node_to_fwnode ( pci - > dev - > of_node ) ;
pp - > irq_domain = irq_domain_create_linear ( fwnode , pp - > num_vectors ,
& dw_pcie_msi_domain_ops , pp ) ;
if ( ! pp - > irq_domain ) {
2018-05-14 16:09:48 +01:00
dev_err ( pci - > dev , " Failed to create IRQ domain \n " ) ;
2018-03-06 11:54:53 +00:00
return - ENOMEM ;
}
pp - > msi_domain = pci_msi_create_irq_domain ( fwnode ,
& dw_pcie_msi_domain_info ,
pp - > irq_domain ) ;
if ( ! pp - > msi_domain ) {
2018-05-14 16:09:48 +01:00
dev_err ( pci - > dev , " Failed to create MSI domain \n " ) ;
2018-03-06 11:54:53 +00:00
irq_domain_remove ( pp - > irq_domain ) ;
return - ENOMEM ;
}
2017-02-15 18:48:17 +05:30
return 0 ;
}
2018-03-06 11:54:53 +00:00
void dw_pcie_free_msi ( struct pcie_port * pp )
{
2019-03-29 11:56:25 +00:00
if ( pp - > msi_irq ) {
irq_set_chained_handler ( pp - > msi_irq , NULL ) ;
irq_set_handler_data ( pp - > msi_irq , NULL ) ;
}
2018-03-06 11:54:53 +00:00
irq_domain_remove ( pp - > msi_domain ) ;
irq_domain_remove ( pp - > irq_domain ) ;
2019-03-29 11:57:17 +00:00
if ( pp - > msi_page )
__free_page ( pp - > msi_page ) ;
2018-03-06 11:54:53 +00:00
}
2017-02-15 18:48:17 +05:30
void dw_pcie_msi_init ( struct pcie_port * pp )
{
2017-12-20 00:29:22 +01:00
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
struct device * dev = pci - > dev ;
2017-02-15 18:48:17 +05:30
u64 msi_target ;
2019-03-29 11:57:17 +00:00
pp - > msi_page = alloc_page ( GFP_KERNEL ) ;
pp - > msi_data = dma_map_page ( dev , pp - > msi_page , 0 , PAGE_SIZE ,
DMA_FROM_DEVICE ) ;
2017-12-20 00:29:22 +01:00
if ( dma_mapping_error ( dev , pp - > msi_data ) ) {
2018-05-14 16:09:48 +01:00
dev_err ( dev , " Failed to map MSI data \n " ) ;
2019-03-29 11:57:17 +00:00
__free_page ( pp - > msi_page ) ;
pp - > msi_page = NULL ;
2017-12-20 00:29:22 +01:00
return ;
}
msi_target = ( u64 ) pp - > msi_data ;
2017-02-15 18:48:17 +05:30
2018-05-14 16:09:48 +01:00
/* Program the msi_data */
2017-02-15 18:48:17 +05:30
dw_pcie_wr_own_conf ( pp , PCIE_MSI_ADDR_LO , 4 ,
2018-03-06 11:54:53 +00:00
lower_32_bits ( msi_target ) ) ;
2017-02-15 18:48:17 +05:30
dw_pcie_wr_own_conf ( pp , PCIE_MSI_ADDR_HI , 4 ,
2018-03-06 11:54:53 +00:00
upper_32_bits ( msi_target ) ) ;
2017-02-15 18:48:17 +05:30
}
2019-06-25 14:52:38 +05:30
EXPORT_SYMBOL_GPL ( dw_pcie_msi_init ) ;
2017-02-15 18:48:17 +05:30
int dw_pcie_host_init ( struct pcie_port * pp )
{
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
struct device * dev = pci - > dev ;
struct device_node * np = dev - > of_node ;
struct platform_device * pdev = to_platform_device ( dev ) ;
2018-03-06 11:54:53 +00:00
struct resource_entry * win , * tmp ;
2019-03-29 11:59:26 +00:00
struct pci_bus * child ;
2017-06-28 15:13:56 -05:00
struct pci_host_bridge * bridge ;
2017-02-15 18:48:17 +05:30
struct resource * cfg_res ;
2019-09-12 16:02:38 +03:00
u32 hdr_type ;
2018-03-06 11:54:53 +00:00
int ret ;
raw_spin_lock_init ( & pci - > pp . lock ) ;
2017-02-15 18:48:17 +05:30
cfg_res = platform_get_resource_byname ( pdev , IORESOURCE_MEM , " config " ) ;
if ( cfg_res ) {
2018-05-14 16:09:49 +01:00
pp - > cfg0_size = resource_size ( cfg_res ) > > 1 ;
pp - > cfg1_size = resource_size ( cfg_res ) > > 1 ;
2017-02-15 18:48:17 +05:30
pp - > cfg0_base = cfg_res - > start ;
pp - > cfg1_base = cfg_res - > start + pp - > cfg0_size ;
} else if ( ! pp - > va_cfg0_base ) {
2018-05-14 16:09:48 +01:00
dev_err ( dev , " Missing *config* reg space \n " ) ;
2017-02-15 18:48:17 +05:30
}
2019-03-29 11:58:53 +00:00
bridge = devm_pci_alloc_host_bridge ( dev , 0 ) ;
2017-06-28 15:13:56 -05:00
if ( ! bridge )
return - ENOMEM ;
2018-05-15 11:07:05 +02:00
ret = devm_of_pci_get_host_bridge_resources ( dev , 0 , 0xff ,
2017-06-28 15:13:56 -05:00
& bridge - > windows , & pp - > io_base ) ;
2017-02-15 18:48:17 +05:30
if ( ret )
return ret ;
2017-06-28 15:13:56 -05:00
ret = devm_request_pci_bus_resources ( dev , & bridge - > windows ) ;
2017-02-15 18:48:17 +05:30
if ( ret )
2019-03-29 11:58:53 +00:00
return ret ;
2017-02-15 18:48:17 +05:30
/* Get the I/O and memory ranges from DT */
2017-06-28 15:13:56 -05:00
resource_list_for_each_entry_safe ( win , tmp , & bridge - > windows ) {
2017-02-15 18:48:17 +05:30
switch ( resource_type ( win - > res ) ) {
case IORESOURCE_IO :
2018-07-18 15:40:46 -05:00
ret = devm_pci_remap_iospace ( dev , win - > res ,
pp - > io_base ) ;
2017-02-15 18:48:17 +05:30
if ( ret ) {
2018-05-14 16:09:48 +01:00
dev_warn ( dev , " Error %d: failed to map resource %pR \n " ,
2017-02-15 18:48:17 +05:30
ret , win - > res ) ;
resource_list_destroy_entry ( win ) ;
} else {
pp - > io = win - > res ;
pp - > io - > name = " I/O " ;
pp - > io_size = resource_size ( pp - > io ) ;
pp - > io_bus_addr = pp - > io - > start - win - > offset ;
}
break ;
case IORESOURCE_MEM :
pp - > mem = win - > res ;
pp - > mem - > name = " MEM " ;
pp - > mem_size = resource_size ( pp - > mem ) ;
pp - > mem_bus_addr = pp - > mem - > start - win - > offset ;
break ;
case 0 :
pp - > cfg = win - > res ;
2018-05-14 16:09:49 +01:00
pp - > cfg0_size = resource_size ( pp - > cfg ) > > 1 ;
pp - > cfg1_size = resource_size ( pp - > cfg ) > > 1 ;
2017-02-15 18:48:17 +05:30
pp - > cfg0_base = pp - > cfg - > start ;
pp - > cfg1_base = pp - > cfg - > start + pp - > cfg0_size ;
break ;
case IORESOURCE_BUS :
pp - > busn = win - > res ;
break ;
}
}
if ( ! pci - > dbi_base ) {
2017-04-19 17:49:03 +01:00
pci - > dbi_base = devm_pci_remap_cfgspace ( dev ,
pp - > cfg - > start ,
resource_size ( pp - > cfg ) ) ;
2017-02-15 18:48:17 +05:30
if ( ! pci - > dbi_base ) {
2018-05-14 16:09:48 +01:00
dev_err ( dev , " Error with ioremap \n " ) ;
2019-03-29 11:58:53 +00:00
return - ENOMEM ;
2017-02-15 18:48:17 +05:30
}
}
pp - > mem_base = pp - > mem - > start ;
if ( ! pp - > va_cfg0_base ) {
2017-04-19 17:49:03 +01:00
pp - > va_cfg0_base = devm_pci_remap_cfgspace ( dev ,
pp - > cfg0_base , pp - > cfg0_size ) ;
2017-02-15 18:48:17 +05:30
if ( ! pp - > va_cfg0_base ) {
2018-05-14 16:09:48 +01:00
dev_err ( dev , " Error with ioremap in function \n " ) ;
2019-03-29 11:58:53 +00:00
return - ENOMEM ;
2017-02-15 18:48:17 +05:30
}
}
if ( ! pp - > va_cfg1_base ) {
2017-04-19 17:49:03 +01:00
pp - > va_cfg1_base = devm_pci_remap_cfgspace ( dev ,
pp - > cfg1_base ,
2017-02-15 18:48:17 +05:30
pp - > cfg1_size ) ;
if ( ! pp - > va_cfg1_base ) {
2018-05-14 16:09:48 +01:00
dev_err ( dev , " Error with ioremap \n " ) ;
2019-03-29 11:58:53 +00:00
return - ENOMEM ;
2017-02-15 18:48:17 +05:30
}
}
ret = of_property_read_u32 ( np , " num-viewport " , & pci - > num_viewport ) ;
if ( ret )
pci - > num_viewport = 2 ;
2019-03-29 11:57:54 +00:00
if ( pci_msi_enabled ( ) ) {
2018-03-06 11:54:53 +00:00
/*
* If a specific SoC driver needs to change the
* default number of vectors , it needs to implement
* the set_num_vectors callback .
*/
if ( ! pp - > ops - > set_num_vectors ) {
pp - > num_vectors = MSI_DEF_NUM_VECTORS ;
} else {
pp - > ops - > set_num_vectors ( pp ) ;
if ( pp - > num_vectors > MAX_MSI_IRQS | |
pp - > num_vectors = = 0 ) {
dev_err ( dev ,
" Invalid number of vectors \n " ) ;
2019-03-29 11:58:53 +00:00
return - EINVAL ;
2017-02-15 18:48:17 +05:30
}
2018-03-06 11:54:53 +00:00
}
2017-02-15 18:48:17 +05:30
2018-03-06 11:54:53 +00:00
if ( ! pp - > ops - > msi_host_init ) {
2019-03-21 15:29:25 +05:30
pp - > msi_irq_chip = & dw_pci_msi_bottom_irq_chip ;
2018-03-06 11:54:53 +00:00
ret = dw_pcie_allocate_domains ( pp ) ;
if ( ret )
2019-03-29 11:58:53 +00:00
return ret ;
2018-03-06 11:54:53 +00:00
if ( pp - > msi_irq )
irq_set_chained_handler_and_data ( pp - > msi_irq ,
dw_chained_msi_isr ,
pp ) ;
2017-02-15 18:48:17 +05:30
} else {
2018-03-06 11:54:54 +00:00
ret = pp - > ops - > msi_host_init ( pp ) ;
2017-02-15 18:48:17 +05:30
if ( ret < 0 )
2019-03-29 11:58:53 +00:00
return ret ;
2017-02-15 18:48:17 +05:30
}
}
2017-07-15 23:39:45 -07:00
if ( pp - > ops - > host_init ) {
ret = pp - > ops - > host_init ( pp ) ;
if ( ret )
2019-03-29 11:57:54 +00:00
goto err_free_msi ;
2017-07-15 23:39:45 -07:00
}
2017-02-15 18:48:17 +05:30
2019-09-12 16:02:38 +03:00
ret = dw_pcie_rd_own_conf ( pp , PCI_HEADER_TYPE , 1 , & hdr_type ) ;
if ( ret ! = PCIBIOS_SUCCESSFUL ) {
dev_err ( pci - > dev , " Failed reading PCI_HEADER_TYPE cfg space reg (ret: 0x%x) \n " ,
ret ) ;
ret = pcibios_err_to_errno ( ret ) ;
goto err_free_msi ;
}
if ( hdr_type ! = PCI_HEADER_TYPE_BRIDGE ) {
dev_err ( pci - > dev ,
" PCIe controller is not set to bridge type (hdr_type: 0x%x)! \n " ,
hdr_type ) ;
ret = - EIO ;
goto err_free_msi ;
}
2017-02-15 18:48:17 +05:30
pp - > root_bus_nr = pp - > busn - > start ;
2017-06-28 15:13:56 -05:00
bridge - > dev . parent = dev ;
bridge - > sysdata = pp ;
bridge - > busnr = pp - > root_bus_nr ;
bridge - > ops = & dw_pcie_ops ;
2017-06-28 15:14:07 -05:00
bridge - > map_irq = of_irq_parse_and_map_pci ;
bridge - > swizzle_irq = pci_common_swizzle ;
2017-02-15 18:48:17 +05:30
2017-06-28 15:13:56 -05:00
ret = pci_scan_root_bus_bridge ( bridge ) ;
if ( ret )
2019-03-29 11:57:54 +00:00
goto err_free_msi ;
2017-06-28 15:13:56 -05:00
2019-03-29 11:59:26 +00:00
pp - > root_bus = bridge - > bus ;
2017-06-28 15:13:56 -05:00
2017-02-15 18:48:17 +05:30
if ( pp - > ops - > scan_bus )
pp - > ops - > scan_bus ( pp ) ;
2019-03-29 11:59:26 +00:00
pci_bus_size_bridges ( pp - > root_bus ) ;
pci_bus_assign_resources ( pp - > root_bus ) ;
2017-02-15 18:48:17 +05:30
2019-03-29 11:59:26 +00:00
list_for_each_entry ( child , & pp - > root_bus - > children , node )
2017-02-15 18:48:17 +05:30
pcie_bus_configure_settings ( child ) ;
2019-03-29 11:59:26 +00:00
pci_bus_add_devices ( pp - > root_bus ) ;
2017-02-15 18:48:17 +05:30
return 0 ;
2019-03-29 11:57:54 +00:00
err_free_msi :
if ( pci_msi_enabled ( ) & & ! pp - > ops - > msi_host_init )
dw_pcie_free_msi ( pp ) ;
2017-02-15 18:48:17 +05:30
return ret ;
}
2019-06-25 14:52:38 +05:30
EXPORT_SYMBOL_GPL ( dw_pcie_host_init ) ;
2017-02-15 18:48:17 +05:30
2019-06-25 14:52:36 +05:30
void dw_pcie_host_deinit ( struct pcie_port * pp )
{
pci_stop_root_bus ( pp - > root_bus ) ;
pci_remove_root_bus ( pp - > root_bus ) ;
if ( pci_msi_enabled ( ) & & ! pp - > ops - > msi_host_init )
dw_pcie_free_msi ( pp ) ;
}
2019-06-25 14:52:38 +05:30
EXPORT_SYMBOL_GPL ( dw_pcie_host_deinit ) ;
2019-06-25 14:52:36 +05:30
2019-02-19 12:02:39 -08:00
static int dw_pcie_access_other_conf ( struct pcie_port * pp , struct pci_bus * bus ,
u32 devfn , int where , int size , u32 * val ,
bool write )
2017-02-15 18:48:17 +05:30
{
int ret , type ;
u32 busdev , cfg_size ;
u64 cpu_addr ;
void __iomem * va_cfg_base ;
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
busdev = PCIE_ATU_BUS ( bus - > number ) | PCIE_ATU_DEV ( PCI_SLOT ( devfn ) ) |
PCIE_ATU_FUNC ( PCI_FUNC ( devfn ) ) ;
if ( bus - > parent - > number = = pp - > root_bus_nr ) {
type = PCIE_ATU_TYPE_CFG0 ;
cpu_addr = pp - > cfg0_base ;
cfg_size = pp - > cfg0_size ;
va_cfg_base = pp - > va_cfg0_base ;
} else {
type = PCIE_ATU_TYPE_CFG1 ;
cpu_addr = pp - > cfg1_base ;
cfg_size = pp - > cfg1_size ;
va_cfg_base = pp - > va_cfg1_base ;
}
dw_pcie_prog_outbound_atu ( pci , PCIE_ATU_REGION_INDEX1 ,
type , cpu_addr ,
busdev , cfg_size ) ;
2019-02-19 12:02:39 -08:00
if ( write )
ret = dw_pcie_write ( va_cfg_base + where , size , * val ) ;
else
ret = dw_pcie_read ( va_cfg_base + where , size , val ) ;
2017-02-15 18:48:17 +05:30
if ( pci - > num_viewport < = 2 )
dw_pcie_prog_outbound_atu ( pci , PCIE_ATU_REGION_INDEX1 ,
PCIE_ATU_TYPE_IO , pp - > io_base ,
pp - > io_bus_addr , pp - > io_size ) ;
return ret ;
}
2019-02-19 12:02:39 -08:00
static int dw_pcie_rd_other_conf ( struct pcie_port * pp , struct pci_bus * bus ,
u32 devfn , int where , int size , u32 * val )
{
if ( pp - > ops - > rd_other_conf )
return pp - > ops - > rd_other_conf ( pp , bus , devfn , where ,
size , val ) ;
return dw_pcie_access_other_conf ( pp , bus , devfn , where , size , val ,
false ) ;
}
2017-02-15 18:48:17 +05:30
static int dw_pcie_wr_other_conf ( struct pcie_port * pp , struct pci_bus * bus ,
u32 devfn , int where , int size , u32 val )
{
if ( pp - > ops - > wr_other_conf )
2019-02-19 12:02:39 -08:00
return pp - > ops - > wr_other_conf ( pp , bus , devfn , where ,
size , val ) ;
2017-02-15 18:48:17 +05:30
2019-02-19 12:02:39 -08:00
return dw_pcie_access_other_conf ( pp , bus , devfn , where , size , & val ,
true ) ;
2017-02-15 18:48:17 +05:30
}
static int dw_pcie_valid_device ( struct pcie_port * pp , struct pci_bus * bus ,
int dev )
{
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
/* If there is no link, then there is no device */
if ( bus - > number ! = pp - > root_bus_nr ) {
if ( ! dw_pcie_link_up ( pci ) )
return 0 ;
}
2018-05-14 16:09:48 +01:00
/* Access only one slot on each root port */
2017-02-15 18:48:17 +05:30
if ( bus - > number = = pp - > root_bus_nr & & dev > 0 )
return 0 ;
return 1 ;
}
static int dw_pcie_rd_conf ( struct pci_bus * bus , u32 devfn , int where ,
int size , u32 * val )
{
struct pcie_port * pp = bus - > sysdata ;
if ( ! dw_pcie_valid_device ( pp , bus , PCI_SLOT ( devfn ) ) ) {
* val = 0xffffffff ;
return PCIBIOS_DEVICE_NOT_FOUND ;
}
if ( bus - > number = = pp - > root_bus_nr )
return dw_pcie_rd_own_conf ( pp , where , size , val ) ;
return dw_pcie_rd_other_conf ( pp , bus , devfn , where , size , val ) ;
}
static int dw_pcie_wr_conf ( struct pci_bus * bus , u32 devfn ,
int where , int size , u32 val )
{
struct pcie_port * pp = bus - > sysdata ;
if ( ! dw_pcie_valid_device ( pp , bus , PCI_SLOT ( devfn ) ) )
return PCIBIOS_DEVICE_NOT_FOUND ;
if ( bus - > number = = pp - > root_bus_nr )
return dw_pcie_wr_own_conf ( pp , where , size , val ) ;
return dw_pcie_wr_other_conf ( pp , bus , devfn , where , size , val ) ;
}
static struct pci_ops dw_pcie_ops = {
. read = dw_pcie_rd_conf ,
. write = dw_pcie_wr_conf ,
} ;
void dw_pcie_setup_rc ( struct pcie_port * pp )
{
2018-03-06 11:54:55 +00:00
u32 val , ctrl , num_ctrls ;
2017-02-15 18:48:17 +05:30
struct dw_pcie * pci = to_dw_pcie_from_pp ( pp ) ;
2019-08-13 17:06:17 +05:30
/*
* Enable DBI read - only registers for writing / updating configuration .
* Write permission gets disabled towards the end of this function .
*/
dw_pcie_dbi_ro_wr_en ( pci ) ;
2017-02-15 18:48:17 +05:30
dw_pcie_setup ( pci ) ;
2019-03-21 15:29:27 +05:30
if ( ! pp - > ops - > msi_host_init ) {
num_ctrls = pp - > num_vectors / MAX_MSI_IRQS_PER_CTRL ;
/* Initialize IRQ Status array */
for ( ctrl = 0 ; ctrl < num_ctrls ; ctrl + + ) {
pp - > irq_mask [ ctrl ] = ~ 0 ;
dw_pcie_wr_own_conf ( pp , PCIE_MSI_INTR0_MASK +
( ctrl * MSI_REG_CTRL_BLOCK_SIZE ) ,
4 , pp - > irq_mask [ ctrl ] ) ;
dw_pcie_wr_own_conf ( pp , PCIE_MSI_INTR0_ENABLE +
( ctrl * MSI_REG_CTRL_BLOCK_SIZE ) ,
4 , ~ 0 ) ;
}
2018-11-13 22:57:32 +00:00
}
2018-05-14 16:09:48 +01:00
/* Setup RC BARs */
2017-02-15 18:48:17 +05:30
dw_pcie_writel_dbi ( pci , PCI_BASE_ADDRESS_0 , 0x00000004 ) ;
dw_pcie_writel_dbi ( pci , PCI_BASE_ADDRESS_1 , 0x00000000 ) ;
2018-05-14 16:09:48 +01:00
/* Setup interrupt pins */
2017-02-15 18:48:17 +05:30
val = dw_pcie_readl_dbi ( pci , PCI_INTERRUPT_LINE ) ;
val & = 0xffff00ff ;
val | = 0x00000100 ;
dw_pcie_writel_dbi ( pci , PCI_INTERRUPT_LINE , val ) ;
2018-05-14 16:09:48 +01:00
/* Setup bus numbers */
2017-02-15 18:48:17 +05:30
val = dw_pcie_readl_dbi ( pci , PCI_PRIMARY_BUS ) ;
val & = 0xff000000 ;
2018-03-07 10:46:39 -06:00
val | = 0x00ff0100 ;
2017-02-15 18:48:17 +05:30
dw_pcie_writel_dbi ( pci , PCI_PRIMARY_BUS , val ) ;
2018-05-14 16:09:48 +01:00
/* Setup command register */
2017-02-15 18:48:17 +05:30
val = dw_pcie_readl_dbi ( pci , PCI_COMMAND ) ;
val & = 0xffff0000 ;
val | = PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR ;
dw_pcie_writel_dbi ( pci , PCI_COMMAND , val ) ;
/*
* If the platform provides - > rd_other_conf , it means the platform
* uses its own address translation component rather than ATU , so
* we should not program the ATU here .
*/
if ( ! pp - > ops - > rd_other_conf ) {
dw_pcie_prog_outbound_atu ( pci , PCIE_ATU_REGION_INDEX0 ,
PCIE_ATU_TYPE_MEM , pp - > mem_base ,
pp - > mem_bus_addr , pp - > mem_size ) ;
if ( pci - > num_viewport > 2 )
dw_pcie_prog_outbound_atu ( pci , PCIE_ATU_REGION_INDEX2 ,
PCIE_ATU_TYPE_IO , pp - > io_base ,
pp - > io_bus_addr , pp - > io_size ) ;
}
dw_pcie_wr_own_conf ( pp , PCI_BASE_ADDRESS_0 , 4 , 0 ) ;
2018-05-14 16:09:48 +01:00
/* Program correct class for RC */
2017-02-15 18:48:17 +05:30
dw_pcie_wr_own_conf ( pp , PCI_CLASS_DEVICE , 2 , PCI_CLASS_BRIDGE_PCI ) ;
dw_pcie_rd_own_conf ( pp , PCIE_LINK_WIDTH_SPEED_CONTROL , 4 , & val ) ;
val | = PORT_LOGIC_SPEED_CHANGE ;
dw_pcie_wr_own_conf ( pp , PCIE_LINK_WIDTH_SPEED_CONTROL , 4 , val ) ;
2019-08-13 17:06:17 +05:30
dw_pcie_dbi_ro_wr_dis ( pci ) ;
2017-02-15 18:48:17 +05:30
}
2019-06-25 14:52:38 +05:30
EXPORT_SYMBOL_GPL ( dw_pcie_setup_rc ) ;