2021-09-29 17:38:37 +01:00
// SPDX-License-Identifier: GPL-2.0
/*
* PCIe host bridge driver for Apple system - on - chips .
*
* The HW is ECAM compliant , so once the controller is initialized ,
* the driver mostly deals MSI mapping and handling of per - port
* interrupts ( INTx , management and error signals ) .
*
* Initialization requires enabling power and clocks , along with a
* number of register pokes .
*
* Copyright ( C ) 2021 Alyssa Rosenzweig < alyssa @ rosenzweig . io >
* Copyright ( C ) 2021 Google LLC
* Copyright ( C ) 2021 Corellium LLC
* Copyright ( C ) 2021 Mark Kettenis < kettenis @ openbsd . org >
*
* Author : Alyssa Rosenzweig < alyssa @ rosenzweig . io >
* Author : Marc Zyngier < maz @ kernel . org >
*/
# include <linux/gpio/consumer.h>
# include <linux/kernel.h>
# include <linux/iopoll.h>
2021-09-29 17:38:39 +01:00
# include <linux/irqchip/chained_irq.h>
2021-09-29 17:38:37 +01:00
# include <linux/irqdomain.h>
2021-09-29 17:38:42 +01:00
# include <linux/list.h>
2021-09-29 17:38:37 +01:00
# include <linux/module.h>
# include <linux/msi.h>
2021-09-29 17:38:42 +01:00
# include <linux/notifier.h>
2021-09-29 17:38:37 +01:00
# include <linux/of_irq.h>
# include <linux/pci-ecam.h>
# define CORE_RC_PHYIF_CTL 0x00024
# define CORE_RC_PHYIF_CTL_RUN BIT(0)
# define CORE_RC_PHYIF_STAT 0x00028
# define CORE_RC_PHYIF_STAT_REFCLK BIT(4)
# define CORE_RC_CTL 0x00050
# define CORE_RC_CTL_RUN BIT(0)
# define CORE_RC_STAT 0x00058
# define CORE_RC_STAT_READY BIT(0)
# define CORE_FABRIC_STAT 0x04000
# define CORE_FABRIC_STAT_MASK 0x001F001F
# define CORE_LANE_CFG(port) (0x84000 + 0x4000 * (port))
# define CORE_LANE_CFG_REFCLK0REQ BIT(0)
2021-11-17 23:00:44 +09:00
# define CORE_LANE_CFG_REFCLK1REQ BIT(1)
2021-09-29 17:38:37 +01:00
# define CORE_LANE_CFG_REFCLK0ACK BIT(2)
2021-11-17 23:00:44 +09:00
# define CORE_LANE_CFG_REFCLK1ACK BIT(3)
2021-09-29 17:38:37 +01:00
# define CORE_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
# define CORE_LANE_CTL(port) (0x84004 + 0x4000 * (port))
# define CORE_LANE_CTL_CFGACC BIT(15)
# define PORT_LTSSMCTL 0x00080
# define PORT_LTSSMCTL_START BIT(0)
# define PORT_INTSTAT 0x00100
# define PORT_INT_TUNNEL_ERR 31
# define PORT_INT_CPL_TIMEOUT 23
# define PORT_INT_RID2SID_MAPERR 22
# define PORT_INT_CPL_ABORT 21
# define PORT_INT_MSI_BAD_DATA 19
# define PORT_INT_MSI_ERR 18
# define PORT_INT_REQADDR_GT32 17
# define PORT_INT_AF_TIMEOUT 15
# define PORT_INT_LINK_DOWN 14
# define PORT_INT_LINK_UP 12
# define PORT_INT_LINK_BWMGMT 11
# define PORT_INT_AER_MASK (15 << 4)
# define PORT_INT_PORT_ERR 4
# define PORT_INT_INTx(i) i
# define PORT_INT_INTx_MASK 15
# define PORT_INTMSK 0x00104
# define PORT_INTMSKSET 0x00108
# define PORT_INTMSKCLR 0x0010c
# define PORT_MSICFG 0x00124
# define PORT_MSICFG_EN BIT(0)
# define PORT_MSICFG_L2MSINUM_SHIFT 4
# define PORT_MSIBASE 0x00128
# define PORT_MSIBASE_1_SHIFT 16
# define PORT_MSIADDR 0x00168
# define PORT_LINKSTS 0x00208
# define PORT_LINKSTS_UP BIT(0)
# define PORT_LINKSTS_BUSY BIT(2)
# define PORT_LINKCMDSTS 0x00210
# define PORT_OUTS_NPREQS 0x00284
# define PORT_OUTS_NPREQS_REQ BIT(24)
# define PORT_OUTS_NPREQS_CPL BIT(16)
# define PORT_RXWR_FIFO 0x00288
# define PORT_RXWR_FIFO_HDR GENMASK(15, 10)
# define PORT_RXWR_FIFO_DATA GENMASK(9, 0)
# define PORT_RXRD_FIFO 0x0028C
# define PORT_RXRD_FIFO_REQ GENMASK(6, 0)
# define PORT_OUTS_CPLS 0x00290
# define PORT_OUTS_CPLS_SHRD GENMASK(14, 8)
# define PORT_OUTS_CPLS_WAIT GENMASK(6, 0)
# define PORT_APPCLK 0x00800
# define PORT_APPCLK_EN BIT(0)
# define PORT_APPCLK_CGDIS BIT(8)
# define PORT_STATUS 0x00804
# define PORT_STATUS_READY BIT(0)
# define PORT_REFCLK 0x00810
# define PORT_REFCLK_EN BIT(0)
# define PORT_REFCLK_CGDIS BIT(8)
# define PORT_PERST 0x00814
# define PORT_PERST_OFF BIT(0)
# define PORT_RID2SID(i16) (0x00828 + 4 * (i16))
# define PORT_RID2SID_VALID BIT(31)
# define PORT_RID2SID_SID_SHIFT 16
# define PORT_RID2SID_BUS_SHIFT 8
# define PORT_RID2SID_DEV_SHIFT 3
# define PORT_RID2SID_FUNC_SHIFT 0
# define PORT_OUTS_PREQS_HDR 0x00980
# define PORT_OUTS_PREQS_HDR_MASK GENMASK(9, 0)
# define PORT_OUTS_PREQS_DATA 0x00984
# define PORT_OUTS_PREQS_DATA_MASK GENMASK(15, 0)
# define PORT_TUNCTRL 0x00988
# define PORT_TUNCTRL_PERST_ON BIT(0)
# define PORT_TUNCTRL_PERST_ACK_REQ BIT(1)
# define PORT_TUNSTAT 0x0098c
# define PORT_TUNSTAT_PERST_ON BIT(0)
# define PORT_TUNSTAT_PERST_ACK_PEND BIT(1)
# define PORT_PREFMEM_ENABLE 0x00994
2021-09-29 17:38:42 +01:00
# define MAX_RID2SID 64
2021-09-29 17:38:40 +01:00
/*
* The doorbell address is set to 0xfffff000 , which by convention
* matches what MacOS does , and it is possible to use any other
* address ( in the bottom 4 GB , as the base register is only 32 bit ) .
2021-09-29 17:38:41 +01:00
* However , it has to be excluded from the IOVA range , and the DART
* driver has to know about it .
2021-09-29 17:38:40 +01:00
*/
2021-09-29 17:38:41 +01:00
# define DOORBELL_ADDR CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
2021-09-29 17:38:40 +01:00
2021-09-29 17:38:37 +01:00
struct apple_pcie {
2021-09-29 17:38:40 +01:00
struct mutex lock ;
2021-09-29 17:38:37 +01:00
struct device * dev ;
void __iomem * base ;
2021-09-29 17:38:40 +01:00
struct irq_domain * domain ;
unsigned long * bitmap ;
2021-09-29 17:38:42 +01:00
struct list_head ports ;
2021-09-29 17:38:39 +01:00
struct completion event ;
2021-09-29 17:38:40 +01:00
struct irq_fwspec fwspec ;
u32 nvecs ;
2021-09-29 17:38:37 +01:00
} ;
struct apple_pcie_port {
struct apple_pcie * pcie ;
struct device_node * np ;
void __iomem * base ;
2021-09-29 17:38:39 +01:00
struct irq_domain * domain ;
2021-09-29 17:38:42 +01:00
struct list_head entry ;
DECLARE_BITMAP ( sid_map , MAX_RID2SID ) ;
int sid_map_sz ;
2021-09-29 17:38:37 +01:00
int idx ;
} ;
static void rmw_set ( u32 set , void __iomem * addr )
{
writel_relaxed ( readl_relaxed ( addr ) | set , addr ) ;
}
2021-09-29 17:38:38 +01:00
static void rmw_clear ( u32 clr , void __iomem * addr )
{
writel_relaxed ( readl_relaxed ( addr ) & ~ clr , addr ) ;
}
2021-09-29 17:38:40 +01:00
static void apple_msi_top_irq_mask ( struct irq_data * d )
{
pci_msi_mask_irq ( d ) ;
irq_chip_mask_parent ( d ) ;
}
static void apple_msi_top_irq_unmask ( struct irq_data * d )
{
pci_msi_unmask_irq ( d ) ;
irq_chip_unmask_parent ( d ) ;
}
static struct irq_chip apple_msi_top_chip = {
. name = " PCIe MSI " ,
. irq_mask = apple_msi_top_irq_mask ,
. irq_unmask = apple_msi_top_irq_unmask ,
. irq_eoi = irq_chip_eoi_parent ,
. irq_set_affinity = irq_chip_set_affinity_parent ,
. irq_set_type = irq_chip_set_type_parent ,
} ;
static void apple_msi_compose_msg ( struct irq_data * data , struct msi_msg * msg )
{
msg - > address_hi = upper_32_bits ( DOORBELL_ADDR ) ;
msg - > address_lo = lower_32_bits ( DOORBELL_ADDR ) ;
msg - > data = data - > hwirq ;
}
static struct irq_chip apple_msi_bottom_chip = {
. name = " MSI " ,
. irq_mask = irq_chip_mask_parent ,
. irq_unmask = irq_chip_unmask_parent ,
. irq_eoi = irq_chip_eoi_parent ,
. irq_set_affinity = irq_chip_set_affinity_parent ,
. irq_set_type = irq_chip_set_type_parent ,
. irq_compose_msi_msg = apple_msi_compose_msg ,
} ;
static int apple_msi_domain_alloc ( struct irq_domain * domain , unsigned int virq ,
unsigned int nr_irqs , void * args )
{
struct apple_pcie * pcie = domain - > host_data ;
struct irq_fwspec fwspec = pcie - > fwspec ;
unsigned int i ;
int ret , hwirq ;
mutex_lock ( & pcie - > lock ) ;
hwirq = bitmap_find_free_region ( pcie - > bitmap , pcie - > nvecs ,
order_base_2 ( nr_irqs ) ) ;
mutex_unlock ( & pcie - > lock ) ;
if ( hwirq < 0 )
return - ENOSPC ;
2022-03-10 04:21:17 +09:00
fwspec . param [ fwspec . param_count - 2 ] + = hwirq ;
2021-09-29 17:38:40 +01:00
ret = irq_domain_alloc_irqs_parent ( domain , virq , nr_irqs , & fwspec ) ;
if ( ret )
return ret ;
for ( i = 0 ; i < nr_irqs ; i + + ) {
irq_domain_set_hwirq_and_chip ( domain , virq + i , hwirq + i ,
& apple_msi_bottom_chip ,
domain - > host_data ) ;
}
return 0 ;
}
static void apple_msi_domain_free ( struct irq_domain * domain , unsigned int virq ,
unsigned int nr_irqs )
{
struct irq_data * d = irq_domain_get_irq_data ( domain , virq ) ;
struct apple_pcie * pcie = domain - > host_data ;
mutex_lock ( & pcie - > lock ) ;
bitmap_release_region ( pcie - > bitmap , d - > hwirq , order_base_2 ( nr_irqs ) ) ;
mutex_unlock ( & pcie - > lock ) ;
}
static const struct irq_domain_ops apple_msi_domain_ops = {
. alloc = apple_msi_domain_alloc ,
. free = apple_msi_domain_free ,
} ;
static struct msi_domain_info apple_msi_info = {
. flags = ( MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX ) ,
. chip = & apple_msi_top_chip ,
} ;
2021-09-29 17:38:39 +01:00
static void apple_port_irq_mask ( struct irq_data * data )
{
struct apple_pcie_port * port = irq_data_get_irq_chip_data ( data ) ;
writel_relaxed ( BIT ( data - > hwirq ) , port - > base + PORT_INTMSKSET ) ;
}
static void apple_port_irq_unmask ( struct irq_data * data )
{
struct apple_pcie_port * port = irq_data_get_irq_chip_data ( data ) ;
writel_relaxed ( BIT ( data - > hwirq ) , port - > base + PORT_INTMSKCLR ) ;
}
static bool hwirq_is_intx ( unsigned int hwirq )
{
return BIT ( hwirq ) & PORT_INT_INTx_MASK ;
}
static void apple_port_irq_ack ( struct irq_data * data )
{
struct apple_pcie_port * port = irq_data_get_irq_chip_data ( data ) ;
if ( ! hwirq_is_intx ( data - > hwirq ) )
writel_relaxed ( BIT ( data - > hwirq ) , port - > base + PORT_INTSTAT ) ;
}
static int apple_port_irq_set_type ( struct irq_data * data , unsigned int type )
{
/*
* It doesn ' t seem that there is any way to configure the
* trigger , so assume INTx have to be level ( as per the spec ) ,
* and the rest is edge ( which looks likely ) .
*/
if ( hwirq_is_intx ( data - > hwirq ) ^ ! ! ( type & IRQ_TYPE_LEVEL_MASK ) )
return - EINVAL ;
irqd_set_trigger_type ( data , type ) ;
return 0 ;
}
static struct irq_chip apple_port_irqchip = {
. name = " PCIe " ,
. irq_ack = apple_port_irq_ack ,
. irq_mask = apple_port_irq_mask ,
. irq_unmask = apple_port_irq_unmask ,
. irq_set_type = apple_port_irq_set_type ,
} ;
static int apple_port_irq_domain_alloc ( struct irq_domain * domain ,
unsigned int virq , unsigned int nr_irqs ,
void * args )
{
struct apple_pcie_port * port = domain - > host_data ;
struct irq_fwspec * fwspec = args ;
int i ;
for ( i = 0 ; i < nr_irqs ; i + + ) {
irq_flow_handler_t flow = handle_edge_irq ;
unsigned int type = IRQ_TYPE_EDGE_RISING ;
if ( hwirq_is_intx ( fwspec - > param [ 0 ] + i ) ) {
flow = handle_level_irq ;
type = IRQ_TYPE_LEVEL_HIGH ;
}
irq_domain_set_info ( domain , virq + i , fwspec - > param [ 0 ] + i ,
& apple_port_irqchip , port , flow ,
NULL , NULL ) ;
irq_set_irq_type ( virq + i , type ) ;
}
return 0 ;
}
static void apple_port_irq_domain_free ( struct irq_domain * domain ,
unsigned int virq , unsigned int nr_irqs )
{
int i ;
for ( i = 0 ; i < nr_irqs ; i + + ) {
struct irq_data * d = irq_domain_get_irq_data ( domain , virq + i ) ;
irq_set_handler ( virq + i , NULL ) ;
irq_domain_reset_irq_data ( d ) ;
}
}
static const struct irq_domain_ops apple_port_irq_domain_ops = {
. translate = irq_domain_translate_onecell ,
. alloc = apple_port_irq_domain_alloc ,
. free = apple_port_irq_domain_free ,
} ;
static void apple_port_irq_handler ( struct irq_desc * desc )
{
struct apple_pcie_port * port = irq_desc_get_handler_data ( desc ) ;
struct irq_chip * chip = irq_desc_get_chip ( desc ) ;
unsigned long stat ;
int i ;
chained_irq_enter ( chip , desc ) ;
stat = readl_relaxed ( port - > base + PORT_INTSTAT ) ;
for_each_set_bit ( i , & stat , 32 )
generic_handle_domain_irq ( port - > domain , i ) ;
chained_irq_exit ( chip , desc ) ;
}
static int apple_pcie_port_setup_irq ( struct apple_pcie_port * port )
{
struct fwnode_handle * fwnode = & port - > np - > fwnode ;
unsigned int irq ;
/* FIXME: consider moving each interrupt under each port */
irq = irq_of_parse_and_map ( to_of_node ( dev_fwnode ( port - > pcie - > dev ) ) ,
port - > idx ) ;
if ( ! irq )
return - ENXIO ;
port - > domain = irq_domain_create_linear ( fwnode , 32 ,
& apple_port_irq_domain_ops ,
port ) ;
if ( ! port - > domain )
return - ENOMEM ;
/* Disable all interrupts */
writel_relaxed ( ~ 0 , port - > base + PORT_INTMSKSET ) ;
writel_relaxed ( ~ 0 , port - > base + PORT_INTSTAT ) ;
irq_set_chained_handler_and_data ( irq , apple_port_irq_handler , port ) ;
2021-09-29 17:38:40 +01:00
/* Configure MSI base address */
BUILD_BUG_ON ( upper_32_bits ( DOORBELL_ADDR ) ) ;
writel_relaxed ( lower_32_bits ( DOORBELL_ADDR ) , port - > base + PORT_MSIADDR ) ;
/* Enable MSIs, shared between all ports */
writel_relaxed ( 0 , port - > base + PORT_MSIBASE ) ;
writel_relaxed ( ( ilog2 ( port - > pcie - > nvecs ) < < PORT_MSICFG_L2MSINUM_SHIFT ) |
PORT_MSICFG_EN , port - > base + PORT_MSICFG ) ;
2021-09-29 17:38:39 +01:00
return 0 ;
}
static irqreturn_t apple_pcie_port_irq ( int irq , void * data )
{
struct apple_pcie_port * port = data ;
unsigned int hwirq = irq_domain_get_irq_data ( port - > domain , irq ) - > hwirq ;
switch ( hwirq ) {
case PORT_INT_LINK_UP :
dev_info_ratelimited ( port - > pcie - > dev , " Link up on %pOF \n " ,
port - > np ) ;
complete_all ( & port - > pcie - > event ) ;
break ;
case PORT_INT_LINK_DOWN :
dev_info_ratelimited ( port - > pcie - > dev , " Link down on %pOF \n " ,
port - > np ) ;
break ;
default :
return IRQ_NONE ;
}
return IRQ_HANDLED ;
}
static int apple_pcie_port_register_irqs ( struct apple_pcie_port * port )
{
static struct {
unsigned int hwirq ;
const char * name ;
} port_irqs [ ] = {
{ PORT_INT_LINK_UP , " Link up " , } ,
{ PORT_INT_LINK_DOWN , " Link down " , } ,
} ;
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( port_irqs ) ; i + + ) {
struct irq_fwspec fwspec = {
. fwnode = & port - > np - > fwnode ,
. param_count = 1 ,
. param = {
[ 0 ] = port_irqs [ i ] . hwirq ,
} ,
} ;
unsigned int irq ;
int ret ;
irq = irq_domain_alloc_irqs ( port - > domain , 1 , NUMA_NO_NODE ,
& fwspec ) ;
if ( WARN_ON ( ! irq ) )
continue ;
ret = request_irq ( irq , apple_pcie_port_irq , 0 ,
port_irqs [ i ] . name , port ) ;
WARN_ON ( ret ) ;
}
return 0 ;
}
2021-09-29 17:38:38 +01:00
static int apple_pcie_setup_refclk ( struct apple_pcie * pcie ,
struct apple_pcie_port * port )
{
u32 stat ;
int res ;
res = readl_relaxed_poll_timeout ( pcie - > base + CORE_RC_PHYIF_STAT , stat ,
stat & CORE_RC_PHYIF_STAT_REFCLK ,
100 , 50000 ) ;
if ( res < 0 )
return res ;
rmw_set ( CORE_LANE_CTL_CFGACC , pcie - > base + CORE_LANE_CTL ( port - > idx ) ) ;
rmw_set ( CORE_LANE_CFG_REFCLK0REQ , pcie - > base + CORE_LANE_CFG ( port - > idx ) ) ;
res = readl_relaxed_poll_timeout ( pcie - > base + CORE_LANE_CFG ( port - > idx ) ,
stat , stat & CORE_LANE_CFG_REFCLK0ACK ,
100 , 50000 ) ;
if ( res < 0 )
return res ;
2021-11-17 23:00:44 +09:00
rmw_set ( CORE_LANE_CFG_REFCLK1REQ , pcie - > base + CORE_LANE_CFG ( port - > idx ) ) ;
2021-09-29 17:38:38 +01:00
res = readl_relaxed_poll_timeout ( pcie - > base + CORE_LANE_CFG ( port - > idx ) ,
2021-11-17 23:00:44 +09:00
stat , stat & CORE_LANE_CFG_REFCLK1ACK ,
2021-09-29 17:38:38 +01:00
100 , 50000 ) ;
if ( res < 0 )
return res ;
rmw_clear ( CORE_LANE_CTL_CFGACC , pcie - > base + CORE_LANE_CTL ( port - > idx ) ) ;
rmw_set ( CORE_LANE_CFG_REFCLKEN , pcie - > base + CORE_LANE_CFG ( port - > idx ) ) ;
rmw_set ( PORT_REFCLK_EN , port - > base + PORT_REFCLK ) ;
return 0 ;
}
2021-09-29 17:38:42 +01:00
static u32 apple_pcie_rid2sid_write ( struct apple_pcie_port * port ,
int idx , u32 val )
{
writel_relaxed ( val , port - > base + PORT_RID2SID ( idx ) ) ;
/* Read back to ensure completion of the write */
return readl_relaxed ( port - > base + PORT_RID2SID ( idx ) ) ;
}
2021-09-29 17:38:37 +01:00
static int apple_pcie_setup_port ( struct apple_pcie * pcie ,
struct device_node * np )
{
struct platform_device * platform = to_platform_device ( pcie - > dev ) ;
struct apple_pcie_port * port ;
struct gpio_desc * reset ;
u32 stat , idx ;
2021-09-29 17:38:42 +01:00
int ret , i ;
2021-09-29 17:38:37 +01:00
reset = gpiod_get_from_of_node ( np , " reset-gpios " , 0 ,
2021-11-23 18:06:34 +00:00
GPIOD_OUT_LOW , " PERST# " ) ;
2021-09-29 17:38:37 +01:00
if ( IS_ERR ( reset ) )
return PTR_ERR ( reset ) ;
port = devm_kzalloc ( pcie - > dev , sizeof ( * port ) , GFP_KERNEL ) ;
if ( ! port )
return - ENOMEM ;
ret = of_property_read_u32_index ( np , " reg " , 0 , & idx ) ;
if ( ret )
return ret ;
/* Use the first reg entry to work out the port index */
port - > idx = idx > > 11 ;
port - > pcie = pcie ;
port - > np = np ;
port - > base = devm_platform_ioremap_resource ( platform , port - > idx + 2 ) ;
if ( IS_ERR ( port - > base ) )
return PTR_ERR ( port - > base ) ;
rmw_set ( PORT_APPCLK_EN , port - > base + PORT_APPCLK ) ;
2021-11-23 18:06:34 +00:00
/* Assert PERST# before setting up the clock */
2021-11-23 18:06:36 +00:00
gpiod_set_value ( reset , 1 ) ;
2021-11-23 18:06:34 +00:00
2021-09-29 17:38:38 +01:00
ret = apple_pcie_setup_refclk ( pcie , port ) ;
if ( ret < 0 )
return ret ;
2021-11-23 18:06:34 +00:00
/* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */
usleep_range ( 100 , 200 ) ;
/* Deassert PERST# */
2021-09-29 17:38:37 +01:00
rmw_set ( PORT_PERST_OFF , port - > base + PORT_PERST ) ;
2021-11-23 18:06:36 +00:00
gpiod_set_value ( reset , 0 ) ;
2021-09-29 17:38:37 +01:00
2021-11-23 18:06:34 +00:00
/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
msleep ( 100 ) ;
2021-09-29 17:38:37 +01:00
ret = readl_relaxed_poll_timeout ( port - > base + PORT_STATUS , stat ,
stat & PORT_STATUS_READY , 100 , 250000 ) ;
if ( ret < 0 ) {
dev_err ( pcie - > dev , " port %pOF ready wait timeout \n " , np ) ;
return ret ;
}
2021-11-17 23:19:16 +09:00
rmw_clear ( PORT_REFCLK_CGDIS , port - > base + PORT_REFCLK ) ;
rmw_clear ( PORT_APPCLK_CGDIS , port - > base + PORT_APPCLK ) ;
2021-09-29 17:38:39 +01:00
ret = apple_pcie_port_setup_irq ( port ) ;
if ( ret )
return ret ;
2021-09-29 17:38:42 +01:00
/* Reset all RID/SID mappings, and check for RAZ/WI registers */
for ( i = 0 ; i < MAX_RID2SID ; i + + ) {
if ( apple_pcie_rid2sid_write ( port , i , 0xbad1d ) ! = 0xbad1d )
break ;
apple_pcie_rid2sid_write ( port , i , 0 ) ;
}
dev_dbg ( pcie - > dev , " %pOF: %d RID/SID mapping entries \n " , np , i ) ;
port - > sid_map_sz = i ;
list_add_tail ( & port - > entry , & pcie - > ports ) ;
2021-09-29 17:38:39 +01:00
init_completion ( & pcie - > event ) ;
ret = apple_pcie_port_register_irqs ( port ) ;
WARN_ON ( ret ) ;
2021-09-29 17:38:37 +01:00
writel_relaxed ( PORT_LTSSMCTL_START , port - > base + PORT_LTSSMCTL ) ;
2021-09-29 17:38:39 +01:00
if ( ! wait_for_completion_timeout ( & pcie - > event , HZ / 10 ) )
dev_warn ( pcie - > dev , " %pOF link didn't come up \n " , np ) ;
2021-09-29 17:38:37 +01:00
return 0 ;
}
2021-09-29 17:38:40 +01:00
static int apple_msi_init ( struct apple_pcie * pcie )
{
struct fwnode_handle * fwnode = dev_fwnode ( pcie - > dev ) ;
struct of_phandle_args args = { } ;
struct irq_domain * parent ;
int ret ;
ret = of_parse_phandle_with_args ( to_of_node ( fwnode ) , " msi-ranges " ,
" #interrupt-cells " , 0 , & args ) ;
if ( ret )
return ret ;
ret = of_property_read_u32_index ( to_of_node ( fwnode ) , " msi-ranges " ,
args . args_count + 1 , & pcie - > nvecs ) ;
if ( ret )
return ret ;
of_phandle_args_to_fwspec ( args . np , args . args , args . args_count ,
& pcie - > fwspec ) ;
pcie - > bitmap = devm_bitmap_zalloc ( pcie - > dev , pcie - > nvecs , GFP_KERNEL ) ;
if ( ! pcie - > bitmap )
return - ENOMEM ;
parent = irq_find_matching_fwspec ( & pcie - > fwspec , DOMAIN_BUS_WIRED ) ;
if ( ! parent ) {
dev_err ( pcie - > dev , " failed to find parent domain \n " ) ;
return - ENXIO ;
}
parent = irq_domain_create_hierarchy ( parent , 0 , pcie - > nvecs , fwnode ,
& apple_msi_domain_ops , pcie ) ;
if ( ! parent ) {
dev_err ( pcie - > dev , " failed to create IRQ domain \n " ) ;
return - ENOMEM ;
}
irq_domain_update_bus_token ( parent , DOMAIN_BUS_NEXUS ) ;
pcie - > domain = pci_msi_create_irq_domain ( fwnode , & apple_msi_info ,
parent ) ;
if ( ! pcie - > domain ) {
dev_err ( pcie - > dev , " failed to create MSI domain \n " ) ;
irq_domain_remove ( parent ) ;
return - ENOMEM ;
}
return 0 ;
}
2021-09-29 17:38:42 +01:00
static struct apple_pcie_port * apple_pcie_get_port ( struct pci_dev * pdev )
{
struct pci_config_window * cfg = pdev - > sysdata ;
struct apple_pcie * pcie = cfg - > priv ;
struct pci_dev * port_pdev ;
struct apple_pcie_port * port ;
/* Find the root port this device is on */
port_pdev = pcie_find_root_port ( pdev ) ;
/* If finding the port itself, nothing to do */
if ( WARN_ON ( ! port_pdev ) | | pdev = = port_pdev )
return NULL ;
list_for_each_entry ( port , & pcie - > ports , entry ) {
if ( port - > idx = = PCI_SLOT ( port_pdev - > devfn ) )
return port ;
}
return NULL ;
}
static int apple_pcie_add_device ( struct apple_pcie_port * port ,
struct pci_dev * pdev )
{
u32 sid , rid = PCI_DEVID ( pdev - > bus - > number , pdev - > devfn ) ;
int idx , err ;
dev_dbg ( & pdev - > dev , " added to bus %s, index %d \n " ,
pci_name ( pdev - > bus - > self ) , port - > idx ) ;
err = of_map_id ( port - > pcie - > dev - > of_node , rid , " iommu-map " ,
" iommu-map-mask " , NULL , & sid ) ;
if ( err )
return err ;
mutex_lock ( & port - > pcie - > lock ) ;
idx = bitmap_find_free_region ( port - > sid_map , port - > sid_map_sz , 0 ) ;
if ( idx > = 0 ) {
apple_pcie_rid2sid_write ( port , idx ,
PORT_RID2SID_VALID |
( sid < < PORT_RID2SID_SID_SHIFT ) | rid ) ;
dev_dbg ( & pdev - > dev , " mapping RID%x to SID%x (index %d) \n " ,
rid , sid , idx ) ;
}
mutex_unlock ( & port - > pcie - > lock ) ;
return idx > = 0 ? 0 : - ENOSPC ;
}
static void apple_pcie_release_device ( struct apple_pcie_port * port ,
struct pci_dev * pdev )
{
u32 rid = PCI_DEVID ( pdev - > bus - > number , pdev - > devfn ) ;
int idx ;
mutex_lock ( & port - > pcie - > lock ) ;
for_each_set_bit ( idx , port - > sid_map , port - > sid_map_sz ) {
u32 val ;
val = readl_relaxed ( port - > base + PORT_RID2SID ( idx ) ) ;
if ( ( val & 0xffff ) = = rid ) {
apple_pcie_rid2sid_write ( port , idx , 0 ) ;
bitmap_release_region ( port - > sid_map , idx , 0 ) ;
dev_dbg ( & pdev - > dev , " Released %x (%d) \n " , val , idx ) ;
break ;
}
}
mutex_unlock ( & port - > pcie - > lock ) ;
}
static int apple_pcie_bus_notifier ( struct notifier_block * nb ,
unsigned long action ,
void * data )
{
struct device * dev = data ;
struct pci_dev * pdev = to_pci_dev ( dev ) ;
struct apple_pcie_port * port ;
int err ;
/*
* This is a bit ugly . We assume that if we get notified for
* any PCI device , we must be in charge of it , and that there
* is no other PCI controller in the whole system . It probably
* holds for now , but who knows for how long ?
*/
port = apple_pcie_get_port ( pdev ) ;
if ( ! port )
return NOTIFY_DONE ;
switch ( action ) {
case BUS_NOTIFY_ADD_DEVICE :
err = apple_pcie_add_device ( port , pdev ) ;
if ( err )
return notifier_from_errno ( err ) ;
break ;
case BUS_NOTIFY_DEL_DEVICE :
apple_pcie_release_device ( port , pdev ) ;
break ;
default :
return NOTIFY_DONE ;
}
return NOTIFY_OK ;
}
static struct notifier_block apple_pcie_nb = {
. notifier_call = apple_pcie_bus_notifier ,
} ;
2021-09-29 17:38:37 +01:00
static int apple_pcie_init ( struct pci_config_window * cfg )
{
struct device * dev = cfg - > parent ;
struct platform_device * platform = to_platform_device ( dev ) ;
struct device_node * of_port ;
struct apple_pcie * pcie ;
int ret ;
pcie = devm_kzalloc ( dev , sizeof ( * pcie ) , GFP_KERNEL ) ;
if ( ! pcie )
return - ENOMEM ;
pcie - > dev = dev ;
2021-09-29 17:38:40 +01:00
mutex_init ( & pcie - > lock ) ;
2021-09-29 17:38:37 +01:00
pcie - > base = devm_platform_ioremap_resource ( platform , 1 ) ;
if ( IS_ERR ( pcie - > base ) )
return PTR_ERR ( pcie - > base ) ;
2021-09-29 17:38:42 +01:00
cfg - > priv = pcie ;
INIT_LIST_HEAD ( & pcie - > ports ) ;
2021-09-29 17:38:37 +01:00
for_each_child_of_node ( dev - > of_node , of_port ) {
ret = apple_pcie_setup_port ( pcie , of_port ) ;
if ( ret ) {
dev_err ( pcie - > dev , " Port %pOF setup fail: %d \n " , of_port , ret ) ;
of_node_put ( of_port ) ;
return ret ;
}
}
2021-09-29 17:38:40 +01:00
return apple_msi_init ( pcie ) ;
2021-09-29 17:38:37 +01:00
}
2021-09-29 17:38:42 +01:00
static int apple_pcie_probe ( struct platform_device * pdev )
{
int ret ;
ret = bus_register_notifier ( & pci_bus_type , & apple_pcie_nb ) ;
if ( ret )
return ret ;
ret = pci_host_common_probe ( pdev ) ;
if ( ret )
bus_unregister_notifier ( & pci_bus_type , & apple_pcie_nb ) ;
return ret ;
}
2021-09-29 17:38:37 +01:00
static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
. init = apple_pcie_init ,
. pci_ops = {
. map_bus = pci_ecam_map_bus ,
. read = pci_generic_config_read ,
. write = pci_generic_config_write ,
}
} ;
static const struct of_device_id apple_pcie_of_match [ ] = {
{ . compatible = " apple,pcie " , . data = & apple_pcie_cfg_ecam_ops } ,
{ }
} ;
MODULE_DEVICE_TABLE ( of , apple_pcie_of_match ) ;
static struct platform_driver apple_pcie_driver = {
2021-09-29 17:38:42 +01:00
. probe = apple_pcie_probe ,
2021-09-29 17:38:37 +01:00
. driver = {
. name = " pcie-apple " ,
. of_match_table = apple_pcie_of_match ,
. suppress_bind_attrs = true ,
} ,
} ;
module_platform_driver ( apple_pcie_driver ) ;
MODULE_LICENSE ( " GPL v2 " ) ;