2018-01-26 23:22:04 +03:00
// SPDX-License-Identifier: GPL-2.0+
2014-08-20 20:26:02 +04:00
/*
* PCIe host controller driver for Xilinx AXI PCIe Bridge
*
* Copyright ( c ) 2012 - 2014 Xilinx , Inc .
*
* Based on the Tegra PCIe driver
*
2017-09-02 00:35:50 +03:00
* Bits taken from Synopsys DesignWare Host controller driver and
2014-08-20 20:26:02 +04:00
* ARM PCI Host generic driver .
*/
# include <linux/interrupt.h>
# include <linux/irq.h>
# include <linux/irqdomain.h>
# include <linux/kernel.h>
2016-08-24 23:57:49 +03:00
# include <linux/init.h>
2014-08-20 20:26:02 +04:00
# include <linux/msi.h>
# include <linux/of_address.h>
# include <linux/of_pci.h>
# include <linux/of_platform.h>
# include <linux/of_irq.h>
# include <linux/pci.h>
2020-11-30 02:07:39 +03:00
# include <linux/pci-ecam.h>
2014-08-20 20:26:02 +04:00
# include <linux/platform_device.h>
2018-05-11 20:15:30 +03:00
# include "../pci.h"
2014-08-20 20:26:02 +04:00
/* Register definitions */
# define XILINX_PCIE_REG_BIR 0x00000130
# define XILINX_PCIE_REG_IDR 0x00000138
# define XILINX_PCIE_REG_IMR 0x0000013c
# define XILINX_PCIE_REG_PSCR 0x00000144
# define XILINX_PCIE_REG_RPSC 0x00000148
# define XILINX_PCIE_REG_MSIBASE1 0x0000014c
# define XILINX_PCIE_REG_MSIBASE2 0x00000150
# define XILINX_PCIE_REG_RPEFR 0x00000154
# define XILINX_PCIE_REG_RPIFR1 0x00000158
# define XILINX_PCIE_REG_RPIFR2 0x0000015c
/* Interrupt registers definitions */
# define XILINX_PCIE_INTR_LINK_DOWN BIT(0)
# define XILINX_PCIE_INTR_ECRC_ERR BIT(1)
# define XILINX_PCIE_INTR_STR_ERR BIT(2)
# define XILINX_PCIE_INTR_HOT_RESET BIT(3)
# define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8)
# define XILINX_PCIE_INTR_CORRECTABLE BIT(9)
# define XILINX_PCIE_INTR_NONFATAL BIT(10)
# define XILINX_PCIE_INTR_FATAL BIT(11)
# define XILINX_PCIE_INTR_INTX BIT(16)
# define XILINX_PCIE_INTR_MSI BIT(17)
# define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20)
# define XILINX_PCIE_INTR_SLV_UNEXP BIT(21)
# define XILINX_PCIE_INTR_SLV_COMPL BIT(22)
# define XILINX_PCIE_INTR_SLV_ERRP BIT(23)
# define XILINX_PCIE_INTR_SLV_CMPABT BIT(24)
# define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25)
# define XILINX_PCIE_INTR_MST_DECERR BIT(26)
# define XILINX_PCIE_INTR_MST_SLVERR BIT(27)
# define XILINX_PCIE_INTR_MST_ERRP BIT(28)
# define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED
2017-08-16 00:25:25 +03:00
# define XILINX_PCIE_IMR_ENABLE_MASK 0x1FF30F0D
2014-08-20 20:26:02 +04:00
# define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF
/* Root Port Error FIFO Read Register definitions */
# define XILINX_PCIE_RPEFR_ERR_VALID BIT(18)
# define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
# define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
/* Root Port Interrupt FIFO Read Register 1 definitions */
# define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31)
# define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30)
# define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27)
# define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF
# define XILINX_PCIE_RPIFR1_INTR_SHIFT 27
/* Bridge Info Register definitions */
# define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16)
# define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16
/* Root Port Interrupt FIFO Read Register 2 definitions */
# define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0)
/* Root Port Status/control Register definitions */
# define XILINX_PCIE_REG_RPSC_BEN BIT(0)
/* Phy Status/Control Register definitions */
# define XILINX_PCIE_REG_PSCR_LNKUP BIT(11)
/* Number of MSI IRQs */
# define XILINX_NUM_MSI_IRQS 128
/**
2021-12-23 04:10:53 +03:00
* struct xilinx_pcie - PCIe port information
2014-08-20 20:26:02 +04:00
* @ dev : Device pointer
2021-12-23 04:10:53 +03:00
* @ reg_base : IO Mapped Register Base
2021-03-30 18:11:36 +03:00
* @ msi_map : Bitmap of allocated MSIs
* @ map_lock : Mutex protecting the MSI allocation
2016-09-01 13:14:41 +03:00
* @ msi_domain : MSI IRQ domain pointer
* @ leg_domain : Legacy IRQ domain pointer
2014-08-20 20:26:02 +04:00
* @ resources : Bus Resources
*/
2021-12-23 04:10:53 +03:00
struct xilinx_pcie {
2014-08-20 20:26:02 +04:00
struct device * dev ;
2021-12-23 04:10:53 +03:00
void __iomem * reg_base ;
2021-03-30 18:11:36 +03:00
unsigned long msi_map [ BITS_TO_LONGS ( XILINX_NUM_MSI_IRQS ) ] ;
struct mutex map_lock ;
2016-09-01 13:14:41 +03:00
struct irq_domain * msi_domain ;
struct irq_domain * leg_domain ;
2014-08-20 20:26:02 +04:00
struct list_head resources ;
} ;
2021-12-23 04:10:53 +03:00
static inline u32 pcie_read ( struct xilinx_pcie * pcie , u32 reg )
2014-08-20 20:26:02 +04:00
{
2021-12-23 04:10:53 +03:00
return readl ( pcie - > reg_base + reg ) ;
2014-08-20 20:26:02 +04:00
}
2021-12-23 04:10:53 +03:00
static inline void pcie_write ( struct xilinx_pcie * pcie , u32 val , u32 reg )
2014-08-20 20:26:02 +04:00
{
2021-12-23 04:10:53 +03:00
writel ( val , pcie - > reg_base + reg ) ;
2014-08-20 20:26:02 +04:00
}
2021-12-23 04:10:53 +03:00
static inline bool xilinx_pcie_link_up ( struct xilinx_pcie * pcie )
2014-08-20 20:26:02 +04:00
{
2021-12-23 04:10:53 +03:00
return ( pcie_read ( pcie , XILINX_PCIE_REG_PSCR ) &
2014-08-20 20:26:02 +04:00
XILINX_PCIE_REG_PSCR_LNKUP ) ? 1 : 0 ;
}
/**
* xilinx_pcie_clear_err_interrupts - Clear Error Interrupts
2021-12-23 04:10:53 +03:00
* @ pcie : PCIe port information
2014-08-20 20:26:02 +04:00
*/
2021-12-23 04:10:53 +03:00
static void xilinx_pcie_clear_err_interrupts ( struct xilinx_pcie * pcie )
2014-08-20 20:26:02 +04:00
{
2021-12-23 04:10:53 +03:00
struct device * dev = pcie - > dev ;
unsigned long val = pcie_read ( pcie , XILINX_PCIE_REG_RPEFR ) ;
2014-08-20 20:26:02 +04:00
if ( val & XILINX_PCIE_RPEFR_ERR_VALID ) {
2016-10-06 21:44:42 +03:00
dev_dbg ( dev , " Requester ID %lu \n " ,
2014-08-20 20:26:02 +04:00
val & XILINX_PCIE_RPEFR_REQ_ID ) ;
2021-12-23 04:10:53 +03:00
pcie_write ( pcie , XILINX_PCIE_RPEFR_ALL_MASK ,
2014-08-20 20:26:02 +04:00
XILINX_PCIE_REG_RPEFR ) ;
}
}
/**
* xilinx_pcie_valid_device - Check if a valid device is present on bus
* @ bus : PCI Bus structure
* @ devfn : device / function
*
* Return : ' true ' on success and ' false ' if invalid device is found
*/
static bool xilinx_pcie_valid_device ( struct pci_bus * bus , unsigned int devfn )
{
2021-12-23 04:10:53 +03:00
struct xilinx_pcie * pcie = bus - > sysdata ;
2014-08-20 20:26:02 +04:00
2021-12-23 04:10:53 +03:00
/* Check if link is up when trying to access downstream pcie ports */
2020-07-22 05:25:03 +03:00
if ( ! pci_is_root_bus ( bus ) ) {
2021-12-23 04:10:53 +03:00
if ( ! xilinx_pcie_link_up ( pcie ) )
2014-08-20 20:26:02 +04:00
return false ;
2020-07-22 05:25:03 +03:00
} else if ( devfn > 0 ) {
/* Only one device down on each root port */
2014-08-20 20:26:02 +04:00
return false ;
2020-07-22 05:25:03 +03:00
}
2014-08-20 20:26:02 +04:00
return true ;
}
/**
2015-01-10 05:34:50 +03:00
* xilinx_pcie_map_bus - Get configuration base
2014-08-20 20:26:02 +04:00
* @ bus : PCI Bus structure
* @ devfn : Device / function
* @ where : Offset from base
*
* Return : Base address of the configuration space needed to be
* accessed .
*/
2015-01-10 05:34:50 +03:00
static void __iomem * xilinx_pcie_map_bus ( struct pci_bus * bus ,
unsigned int devfn , int where )
2014-08-20 20:26:02 +04:00
{
2021-12-23 04:10:53 +03:00
struct xilinx_pcie * pcie = bus - > sysdata ;
2014-08-20 20:26:02 +04:00
2015-01-10 05:34:50 +03:00
if ( ! xilinx_pcie_valid_device ( bus , devfn ) )
return NULL ;
2021-12-23 04:10:53 +03:00
return pcie - > reg_base + PCIE_ECAM_OFFSET ( bus - > number , devfn , where ) ;
2014-08-20 20:26:02 +04:00
}
/* PCIe operations */
static struct pci_ops xilinx_pcie_ops = {
2015-01-10 05:34:50 +03:00
. map_bus = xilinx_pcie_map_bus ,
. read = pci_generic_config_read ,
. write = pci_generic_config_write ,
2014-08-20 20:26:02 +04:00
} ;
/* MSI functions */
2021-03-30 18:11:36 +03:00
static void xilinx_msi_top_irq_ack ( struct irq_data * d )
2014-08-20 20:26:02 +04:00
{
2021-03-30 18:11:36 +03:00
/*
* xilinx_pcie_intr_handler ( ) will have performed the Ack .
* Eventually , this should be fixed and the Ack be moved in
* the respective callbacks for INTx and MSI .
*/
2014-08-20 20:26:02 +04:00
}
2021-03-30 18:11:36 +03:00
static struct irq_chip xilinx_msi_top_chip = {
. name = " PCIe MSI " ,
. irq_ack = xilinx_msi_top_irq_ack ,
} ;
2014-08-20 20:26:02 +04:00
2021-03-30 18:11:36 +03:00
static int xilinx_msi_set_affinity ( struct irq_data * d , const struct cpumask * mask , bool force )
{
return - EINVAL ;
2014-08-20 20:26:02 +04:00
}
2021-03-30 18:11:36 +03:00
static void xilinx_compose_msi_msg ( struct irq_data * data , struct msi_msg * msg )
2014-08-20 20:26:02 +04:00
{
2021-12-23 04:10:53 +03:00
struct xilinx_pcie * pcie = irq_data_get_irq_chip_data ( data ) ;
2021-03-30 18:11:36 +03:00
phys_addr_t pa = ALIGN_DOWN ( virt_to_phys ( pcie ) , SZ_4K ) ;
msg - > address_lo = lower_32_bits ( pa ) ;
msg - > address_hi = upper_32_bits ( pa ) ;
msg - > data = data - > hwirq ;
2014-08-20 20:26:02 +04:00
}
2021-03-30 18:11:36 +03:00
static struct irq_chip xilinx_msi_bottom_chip = {
. name = " Xilinx MSI " ,
. irq_set_affinity = xilinx_msi_set_affinity ,
. irq_compose_msi_msg = xilinx_compose_msi_msg ,
} ;
static int xilinx_msi_domain_alloc ( struct irq_domain * domain , unsigned int virq ,
unsigned int nr_irqs , void * args )
2014-08-20 20:26:02 +04:00
{
2021-12-23 04:10:53 +03:00
struct xilinx_pcie * pcie = domain - > host_data ;
2021-03-30 18:11:36 +03:00
int hwirq , i ;
2021-12-23 04:10:53 +03:00
mutex_lock ( & pcie - > map_lock ) ;
2021-03-30 18:11:36 +03:00
2021-12-23 04:10:53 +03:00
hwirq = bitmap_find_free_region ( pcie - > msi_map , XILINX_NUM_MSI_IRQS , order_base_2 ( nr_irqs ) ) ;
2021-03-30 18:11:36 +03:00
2021-12-23 04:10:53 +03:00
mutex_unlock ( & pcie - > map_lock ) ;
2014-08-20 20:26:02 +04:00
2014-09-09 16:11:50 +04:00
if ( hwirq < 0 )
2021-03-30 18:11:36 +03:00
return - ENOSPC ;
2014-08-20 20:26:02 +04:00
2021-03-30 18:11:36 +03:00
for ( i = 0 ; i < nr_irqs ; i + + )
irq_domain_set_info ( domain , virq + i , hwirq + i ,
& xilinx_msi_bottom_chip , domain - > host_data ,
handle_edge_irq , NULL , NULL ) ;
2014-08-20 20:26:02 +04:00
2021-03-30 18:11:36 +03:00
return 0 ;
}
2014-08-20 20:26:02 +04:00
2021-03-30 18:11:36 +03:00
static void xilinx_msi_domain_free ( struct irq_domain * domain , unsigned int virq ,
unsigned int nr_irqs )
{
struct irq_data * d = irq_domain_get_irq_data ( domain , virq ) ;
2021-12-23 04:10:53 +03:00
struct xilinx_pcie * pcie = domain - > host_data ;
2014-08-20 20:26:02 +04:00
2021-12-23 04:10:53 +03:00
mutex_lock ( & pcie - > map_lock ) ;
2014-08-20 20:26:02 +04:00
2021-12-23 04:10:53 +03:00
bitmap_release_region ( pcie - > msi_map , d - > hwirq , order_base_2 ( nr_irqs ) ) ;
2014-08-20 20:26:02 +04:00
2021-12-23 04:10:53 +03:00
mutex_unlock ( & pcie - > map_lock ) ;
2014-08-20 20:26:02 +04:00
}
2021-03-30 18:11:36 +03:00
static const struct irq_domain_ops xilinx_msi_domain_ops = {
. alloc = xilinx_msi_domain_alloc ,
. free = xilinx_msi_domain_free ,
2014-08-20 20:26:02 +04:00
} ;
2021-03-30 18:11:36 +03:00
static struct msi_domain_info xilinx_msi_info = {
. flags = ( MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS ) ,
. chip = & xilinx_msi_top_chip ,
2014-08-20 20:26:02 +04:00
} ;
2021-12-23 04:10:53 +03:00
static int xilinx_allocate_msi_domains ( struct xilinx_pcie * pcie )
2014-08-20 20:26:02 +04:00
{
2021-03-30 18:11:36 +03:00
struct fwnode_handle * fwnode = dev_fwnode ( pcie - > dev ) ;
struct irq_domain * parent ;
parent = irq_domain_create_linear ( fwnode , XILINX_NUM_MSI_IRQS ,
& xilinx_msi_domain_ops , pcie ) ;
if ( ! parent ) {
dev_err ( pcie - > dev , " failed to create IRQ domain \n " ) ;
return - ENOMEM ;
}
irq_domain_update_bus_token ( parent , DOMAIN_BUS_NEXUS ) ;
pcie - > msi_domain = pci_msi_create_irq_domain ( fwnode , & xilinx_msi_info , parent ) ;
if ( ! pcie - > msi_domain ) {
dev_err ( pcie - > dev , " failed to create MSI domain \n " ) ;
irq_domain_remove ( parent ) ;
return - ENOMEM ;
}
2014-08-20 20:26:02 +04:00
return 0 ;
}
2021-12-23 04:10:53 +03:00
static void xilinx_free_msi_domains ( struct xilinx_pcie * pcie )
2014-08-20 20:26:02 +04:00
{
2021-03-30 18:11:36 +03:00
struct irq_domain * parent = pcie - > msi_domain - > parent ;
2019-03-26 01:19:09 +03:00
2021-03-30 18:11:36 +03:00
irq_domain_remove ( pcie - > msi_domain ) ;
irq_domain_remove ( parent ) ;
2014-08-20 20:26:02 +04:00
}
/* INTx Functions */
/**
* xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
* @ domain : IRQ domain
* @ irq : Virtual IRQ number
* @ hwirq : HW interrupt number
*
* Return : Always returns 0.
*/
static int xilinx_pcie_intx_map ( struct irq_domain * domain , unsigned int irq ,
irq_hw_number_t hwirq )
{
irq_set_chip_and_handler ( irq , & dummy_irq_chip , handle_simple_irq ) ;
irq_set_chip_data ( irq , domain - > host_data ) ;
return 0 ;
}
/* INTx IRQ Domain operations */
static const struct irq_domain_ops intx_domain_ops = {
. map = xilinx_pcie_intx_map ,
2017-08-16 00:25:08 +03:00
. xlate = pci_irqd_intx_xlate ,
2014-08-20 20:26:02 +04:00
} ;
/* PCIe HW Functions */
/**
* xilinx_pcie_intr_handler - Interrupt Service Handler
* @ irq : IRQ number
* @ data : PCIe port information
*
* Return : IRQ_HANDLED on success and IRQ_NONE on failure
*/
static irqreturn_t xilinx_pcie_intr_handler ( int irq , void * data )
{
2021-12-23 04:10:53 +03:00
struct xilinx_pcie * pcie = ( struct xilinx_pcie * ) data ;
struct device * dev = pcie - > dev ;
2017-08-16 00:25:21 +03:00
u32 val , mask , status ;
2014-08-20 20:26:02 +04:00
/* Read interrupt decode and mask registers */
2021-12-23 04:10:53 +03:00
val = pcie_read ( pcie , XILINX_PCIE_REG_IDR ) ;
mask = pcie_read ( pcie , XILINX_PCIE_REG_IMR ) ;
2014-08-20 20:26:02 +04:00
status = val & mask ;
if ( ! status )
return IRQ_NONE ;
if ( status & XILINX_PCIE_INTR_LINK_DOWN )
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " Link Down \n " ) ;
2014-08-20 20:26:02 +04:00
if ( status & XILINX_PCIE_INTR_ECRC_ERR )
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " ECRC failed \n " ) ;
2014-08-20 20:26:02 +04:00
if ( status & XILINX_PCIE_INTR_STR_ERR )
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " Streaming error \n " ) ;
2014-08-20 20:26:02 +04:00
if ( status & XILINX_PCIE_INTR_HOT_RESET )
2016-10-06 21:44:42 +03:00
dev_info ( dev , " Hot reset \n " ) ;
2014-08-20 20:26:02 +04:00
if ( status & XILINX_PCIE_INTR_CFG_TIMEOUT )
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " ECAM access timeout \n " ) ;
2014-08-20 20:26:02 +04:00
if ( status & XILINX_PCIE_INTR_CORRECTABLE ) {
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " Correctable error message \n " ) ;
2021-12-23 04:10:53 +03:00
xilinx_pcie_clear_err_interrupts ( pcie ) ;
2014-08-20 20:26:02 +04:00
}
if ( status & XILINX_PCIE_INTR_NONFATAL ) {
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " Non fatal error message \n " ) ;
2021-12-23 04:10:53 +03:00
xilinx_pcie_clear_err_interrupts ( pcie ) ;
2014-08-20 20:26:02 +04:00
}
if ( status & XILINX_PCIE_INTR_FATAL ) {
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " Fatal error message \n " ) ;
2021-12-23 04:10:53 +03:00
xilinx_pcie_clear_err_interrupts ( pcie ) ;
2014-08-20 20:26:02 +04:00
}
2017-08-16 00:25:21 +03:00
if ( status & ( XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI ) ) {
2021-08-02 19:26:19 +03:00
struct irq_domain * domain ;
2021-03-30 18:11:36 +03:00
2021-12-23 04:10:53 +03:00
val = pcie_read ( pcie , XILINX_PCIE_REG_RPIFR1 ) ;
2014-08-20 20:26:02 +04:00
/* Check whether interrupt valid */
if ( ! ( val & XILINX_PCIE_RPIFR1_INTR_VALID ) ) {
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " RP Intr FIFO1 read error \n " ) ;
2016-09-01 13:14:42 +03:00
goto error ;
2014-08-20 20:26:02 +04:00
}
2017-08-16 00:25:21 +03:00
/* Decode the IRQ number */
if ( val & XILINX_PCIE_RPIFR1_MSI_INTR ) {
2021-12-23 04:10:53 +03:00
val = pcie_read ( pcie , XILINX_PCIE_REG_RPIFR2 ) &
2017-08-16 00:25:21 +03:00
XILINX_PCIE_RPIFR2_MSG_DATA ;
2021-12-23 04:10:53 +03:00
domain = pcie - > msi_domain - > parent ;
2017-08-16 00:25:21 +03:00
} else {
2017-08-16 00:25:08 +03:00
val = ( val & XILINX_PCIE_RPIFR1_INTR_MASK ) > >
XILINX_PCIE_RPIFR1_INTR_SHIFT ;
2021-12-23 04:10:53 +03:00
domain = pcie - > leg_domain ;
2015-07-07 19:54:19 +03:00
}
2014-08-20 20:26:02 +04:00
2017-08-16 00:25:21 +03:00
/* Clear interrupt FIFO register 1 */
2021-12-23 04:10:53 +03:00
pcie_write ( pcie , XILINX_PCIE_RPIFR1_ALL_MASK ,
2017-08-16 00:25:21 +03:00
XILINX_PCIE_REG_RPIFR1 ) ;
2014-08-20 20:26:02 +04:00
2021-08-02 19:26:19 +03:00
generic_handle_domain_irq ( domain , val ) ;
2014-08-20 20:26:02 +04:00
}
if ( status & XILINX_PCIE_INTR_SLV_UNSUPP )
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " Slave unsupported request \n " ) ;
2014-08-20 20:26:02 +04:00
if ( status & XILINX_PCIE_INTR_SLV_UNEXP )
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " Slave unexpected completion \n " ) ;
2014-08-20 20:26:02 +04:00
if ( status & XILINX_PCIE_INTR_SLV_COMPL )
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " Slave completion timeout \n " ) ;
2014-08-20 20:26:02 +04:00
if ( status & XILINX_PCIE_INTR_SLV_ERRP )
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " Slave Error Poison \n " ) ;
2014-08-20 20:26:02 +04:00
if ( status & XILINX_PCIE_INTR_SLV_CMPABT )
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " Slave Completer Abort \n " ) ;
2014-08-20 20:26:02 +04:00
if ( status & XILINX_PCIE_INTR_SLV_ILLBUR )
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " Slave Illegal Burst \n " ) ;
2014-08-20 20:26:02 +04:00
if ( status & XILINX_PCIE_INTR_MST_DECERR )
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " Master decode error \n " ) ;
2014-08-20 20:26:02 +04:00
if ( status & XILINX_PCIE_INTR_MST_SLVERR )
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " Master slave error \n " ) ;
2014-08-20 20:26:02 +04:00
if ( status & XILINX_PCIE_INTR_MST_ERRP )
2016-10-06 21:44:42 +03:00
dev_warn ( dev , " Master error poison \n " ) ;
2014-08-20 20:26:02 +04:00
2016-09-01 13:14:42 +03:00
error :
2014-08-20 20:26:02 +04:00
/* Clear the Interrupt Decode register */
2021-12-23 04:10:53 +03:00
pcie_write ( pcie , status , XILINX_PCIE_REG_IDR ) ;
2014-08-20 20:26:02 +04:00
return IRQ_HANDLED ;
}
/**
* xilinx_pcie_init_irq_domain - Initialize IRQ domain
2021-12-23 04:10:53 +03:00
* @ pcie : PCIe port information
2014-08-20 20:26:02 +04:00
*
* Return : ' 0 ' on success and error value on failure
*/
2021-12-23 04:10:53 +03:00
static int xilinx_pcie_init_irq_domain ( struct xilinx_pcie * pcie )
2014-08-20 20:26:02 +04:00
{
2021-12-23 04:10:53 +03:00
struct device * dev = pcie - > dev ;
2014-08-20 20:26:02 +04:00
struct device_node * pcie_intc_node ;
2019-03-26 01:19:09 +03:00
int ret ;
2014-08-20 20:26:02 +04:00
/* Setup INTx */
2021-03-30 18:11:36 +03:00
pcie_intc_node = of_get_next_child ( dev - > of_node , NULL ) ;
2014-08-20 20:26:02 +04:00
if ( ! pcie_intc_node ) {
dev_err ( dev , " No PCIe Intc node found \n " ) ;
2016-07-14 13:10:46 +03:00
return - ENODEV ;
2014-08-20 20:26:02 +04:00
}
2021-12-23 04:10:53 +03:00
pcie - > leg_domain = irq_domain_add_linear ( pcie_intc_node , PCI_NUM_INTX ,
2014-08-20 20:26:02 +04:00
& intx_domain_ops ,
2021-12-23 04:10:53 +03:00
pcie ) ;
2018-06-29 21:49:54 +03:00
of_node_put ( pcie_intc_node ) ;
2021-12-23 04:10:53 +03:00
if ( ! pcie - > leg_domain ) {
2014-08-20 20:26:02 +04:00
dev_err ( dev , " Failed to get a INTx IRQ domain \n " ) ;
2016-07-14 13:10:46 +03:00
return - ENODEV ;
2014-08-20 20:26:02 +04:00
}
/* Setup MSI */
if ( IS_ENABLED ( CONFIG_PCI_MSI ) ) {
2021-12-23 04:10:53 +03:00
phys_addr_t pa = ALIGN_DOWN ( virt_to_phys ( pcie ) , SZ_4K ) ;
2014-08-20 20:26:02 +04:00
2021-12-23 04:10:53 +03:00
ret = xilinx_allocate_msi_domains ( pcie ) ;
2019-03-26 01:19:09 +03:00
if ( ret )
return ret ;
2021-03-30 18:11:36 +03:00
2021-12-23 04:10:53 +03:00
pcie_write ( pcie , upper_32_bits ( pa ) , XILINX_PCIE_REG_MSIBASE1 ) ;
pcie_write ( pcie , lower_32_bits ( pa ) , XILINX_PCIE_REG_MSIBASE2 ) ;
2014-08-20 20:26:02 +04:00
}
return 0 ;
}
/**
* xilinx_pcie_init_port - Initialize hardware
2021-12-23 04:10:53 +03:00
* @ pcie : PCIe port information
2014-08-20 20:26:02 +04:00
*/
2021-12-23 04:10:53 +03:00
static void xilinx_pcie_init_port ( struct xilinx_pcie * pcie )
2014-08-20 20:26:02 +04:00
{
2021-12-23 04:10:53 +03:00
struct device * dev = pcie - > dev ;
2016-10-06 21:44:42 +03:00
2021-12-23 04:10:53 +03:00
if ( xilinx_pcie_link_up ( pcie ) )
2016-10-06 21:44:42 +03:00
dev_info ( dev , " PCIe Link is UP \n " ) ;
2014-08-20 20:26:02 +04:00
else
2016-10-06 21:44:42 +03:00
dev_info ( dev , " PCIe Link is DOWN \n " ) ;
2014-08-20 20:26:02 +04:00
/* Disable all interrupts */
2021-12-23 04:10:53 +03:00
pcie_write ( pcie , ~ XILINX_PCIE_IDR_ALL_MASK ,
2014-08-20 20:26:02 +04:00
XILINX_PCIE_REG_IMR ) ;
/* Clear pending interrupts */
2021-12-23 04:10:53 +03:00
pcie_write ( pcie , pcie_read ( pcie , XILINX_PCIE_REG_IDR ) &
2014-08-20 20:26:02 +04:00
XILINX_PCIE_IMR_ALL_MASK ,
XILINX_PCIE_REG_IDR ) ;
2017-08-16 00:25:25 +03:00
/* Enable all interrupts we handle */
2021-12-23 04:10:53 +03:00
pcie_write ( pcie , XILINX_PCIE_IMR_ENABLE_MASK , XILINX_PCIE_REG_IMR ) ;
2014-08-20 20:26:02 +04:00
/* Enable the Bridge enable bit */
2021-12-23 04:10:53 +03:00
pcie_write ( pcie , pcie_read ( pcie , XILINX_PCIE_REG_RPSC ) |
2014-08-20 20:26:02 +04:00
XILINX_PCIE_REG_RPSC_BEN ,
XILINX_PCIE_REG_RPSC ) ;
}
/**
* xilinx_pcie_parse_dt - Parse Device tree
2021-12-23 04:10:53 +03:00
* @ pcie : PCIe port information
2014-08-20 20:26:02 +04:00
*
* Return : ' 0 ' on success and error value on failure
*/
2021-12-23 04:10:53 +03:00
static int xilinx_pcie_parse_dt ( struct xilinx_pcie * pcie )
2014-08-20 20:26:02 +04:00
{
2021-12-23 04:10:53 +03:00
struct device * dev = pcie - > dev ;
2014-08-20 20:26:02 +04:00
struct device_node * node = dev - > of_node ;
struct resource regs ;
2021-03-30 18:11:36 +03:00
unsigned int irq ;
2014-08-20 20:26:02 +04:00
int err ;
err = of_address_to_resource ( node , 0 , & regs ) ;
if ( err ) {
dev_err ( dev , " missing \" reg \" property \n " ) ;
return err ;
}
2021-12-23 04:10:53 +03:00
pcie - > reg_base = devm_pci_remap_cfg_resource ( dev , & regs ) ;
if ( IS_ERR ( pcie - > reg_base ) )
return PTR_ERR ( pcie - > reg_base ) ;
2014-08-20 20:26:02 +04:00
2021-03-30 18:11:36 +03:00
irq = irq_of_parse_and_map ( node , 0 ) ;
err = devm_request_irq ( dev , irq , xilinx_pcie_intr_handler ,
2015-12-10 22:18:20 +03:00
IRQF_SHARED | IRQF_NO_THREAD ,
2021-12-23 04:10:53 +03:00
" xilinx-pcie " , pcie ) ;
2014-08-20 20:26:02 +04:00
if ( err ) {
2021-03-30 18:11:36 +03:00
dev_err ( dev , " unable to request irq %d \n " , irq ) ;
2014-08-20 20:26:02 +04:00
return err ;
}
return 0 ;
}
/**
* xilinx_pcie_probe - Probe function
* @ pdev : Platform device pointer
*
* Return : ' 0 ' on success and error value on failure
*/
static int xilinx_pcie_probe ( struct platform_device * pdev )
{
struct device * dev = & pdev - > dev ;
2021-12-23 04:10:53 +03:00
struct xilinx_pcie * pcie ;
2017-06-28 23:13:59 +03:00
struct pci_host_bridge * bridge ;
2014-08-20 20:26:02 +04:00
int err ;
if ( ! dev - > of_node )
return - ENODEV ;
2021-12-23 04:10:53 +03:00
bridge = devm_pci_alloc_host_bridge ( dev , sizeof ( * pcie ) ) ;
2017-06-28 23:13:59 +03:00
if ( ! bridge )
return - ENODEV ;
2021-12-23 04:10:53 +03:00
pcie = pci_host_bridge_priv ( bridge ) ;
mutex_init ( & pcie - > map_lock ) ;
pcie - > dev = dev ;
2014-08-20 20:26:02 +04:00
2021-12-23 04:10:53 +03:00
err = xilinx_pcie_parse_dt ( pcie ) ;
2014-08-20 20:26:02 +04:00
if ( err ) {
dev_err ( dev , " Parsing DT failed \n " ) ;
return err ;
}
2021-12-23 04:10:53 +03:00
xilinx_pcie_init_port ( pcie ) ;
2014-08-20 20:26:02 +04:00
2021-12-23 04:10:53 +03:00
err = xilinx_pcie_init_irq_domain ( pcie ) ;
2014-08-20 20:26:02 +04:00
if ( err ) {
dev_err ( dev , " Failed creating IRQ Domain \n " ) ;
return err ;
}
2021-12-23 04:10:53 +03:00
bridge - > sysdata = pcie ;
2017-06-28 23:13:59 +03:00
bridge - > ops = & xilinx_pcie_ops ;
2014-11-12 01:45:31 +03:00
2021-03-30 18:11:36 +03:00
err = pci_host_probe ( bridge ) ;
if ( err )
2021-12-23 04:10:53 +03:00
xilinx_free_msi_domains ( pcie ) ;
2021-03-30 18:11:36 +03:00
return err ;
2014-08-20 20:26:02 +04:00
}
2017-06-20 08:47:48 +03:00
static const struct of_device_id xilinx_pcie_of_match [ ] = {
2014-08-20 20:26:02 +04:00
{ . compatible = " xlnx,axi-pcie-host-1.00.a " , } ,
{ }
} ;
static struct platform_driver xilinx_pcie_driver = {
. driver = {
. name = " xilinx-pcie " ,
. of_match_table = xilinx_pcie_of_match ,
. suppress_bind_attrs = true ,
} ,
. probe = xilinx_pcie_probe ,
} ;
2016-08-24 23:57:49 +03:00
builtin_platform_driver ( xilinx_pcie_driver ) ;