2018-01-26 21:50:27 +03:00
// SPDX-License-Identifier: GPL-2.0
2014-05-12 14:57:48 +04:00
/*
* PCIe driver for Renesas R - Car SoCs
2020-05-07 15:33:13 +03:00
* Copyright ( C ) 2014 - 2020 Renesas Electronics Europe Ltd
2014-05-12 14:57:48 +04:00
*
* Based on :
* arch / sh / drivers / pci / pcie - sh7786 . c
* arch / sh / drivers / pci / ops - sh7786 . c
* Copyright ( C ) 2009 - 2011 Paul Mundt
*
2016-07-23 00:23:21 +03:00
* Author : Phil Edworthy < phil . edworthy @ renesas . com >
2014-05-12 14:57:48 +04:00
*/
2018-04-08 21:04:31 +03:00
# include <linux/bitops.h>
2014-05-12 14:57:48 +04:00
# include <linux/clk.h>
2021-08-15 21:16:50 +03:00
# include <linux/clk-provider.h>
2014-05-12 14:57:48 +04:00
# include <linux/delay.h>
# include <linux/interrupt.h>
2014-05-12 14:57:49 +04:00
# include <linux/irq.h>
# include <linux/irqdomain.h>
2014-05-12 14:57:48 +04:00
# include <linux/kernel.h>
2016-07-23 00:23:21 +03:00
# include <linux/init.h>
2021-08-15 21:16:50 +03:00
# include <linux/iopoll.h>
2014-05-12 14:57:49 +04:00
# include <linux/msi.h>
2014-05-12 14:57:48 +04:00
# include <linux/of_address.h>
# include <linux/of_irq.h>
# include <linux/of_platform.h>
# include <linux/pci.h>
2018-05-03 22:40:54 +03:00
# include <linux/phy/phy.h>
2014-05-12 14:57:48 +04:00
# include <linux/platform_device.h>
2016-01-05 16:00:30 +03:00
# include <linux/pm_runtime.h>
2014-05-12 14:57:48 +04:00
2020-05-07 15:33:13 +03:00
# include "pcie-rcar.h"
2014-05-12 14:57:48 +04:00
2014-05-12 14:57:49 +04:00
struct rcar_msi {
DECLARE_BITMAP ( used , INT_PCI_MSI_NR ) ;
struct irq_domain * domain ;
2021-03-30 18:11:34 +03:00
struct mutex map_lock ;
spinlock_t mask_lock ;
2014-05-12 14:57:49 +04:00
int irq1 ;
int irq2 ;
} ;
2021-08-15 21:16:50 +03:00
# ifdef CONFIG_ARM
/*
* Here we keep a static copy of the remapped PCIe controller address .
* This is only used on aarch32 systems , all of which have one single
* PCIe controller , to provide quick access to the PCIe controller in
* the L1 link state fixup function , called from the ARM fault handler .
*/
static void __iomem * pcie_base ;
/*
2021-11-15 23:46:41 +03:00
* Static copy of PCIe device pointer , so we can check whether the
* device is runtime suspended or not .
2021-08-15 21:16:50 +03:00
*/
2021-11-15 23:46:41 +03:00
static struct device * pcie_dev ;
2021-08-15 21:16:50 +03:00
# endif
2014-05-12 14:57:48 +04:00
/* Structure representing the PCIe interface */
2020-05-07 15:33:13 +03:00
struct rcar_pcie_host {
struct rcar_pcie pcie ;
2018-05-03 22:40:54 +03:00
struct phy * phy ;
2014-05-12 14:57:48 +04:00
struct clk * bus_clk ;
2014-05-12 14:57:49 +04:00
struct rcar_msi msi ;
2020-05-07 15:33:13 +03:00
int ( * phy_init_fn ) ( struct rcar_pcie_host * host ) ;
2014-05-12 14:57:48 +04:00
} ;
2021-03-30 18:11:34 +03:00
static struct rcar_pcie_host * msi_to_host ( struct rcar_msi * msi )
{
return container_of ( msi , struct rcar_pcie_host , msi ) ;
}
2014-05-12 14:57:48 +04:00
static u32 rcar_read_conf ( struct rcar_pcie * pcie , int where )
{
2019-03-25 14:40:59 +03:00
unsigned int shift = BITS_PER_BYTE * ( where & 3 ) ;
2014-06-30 11:54:23 +04:00
u32 val = rcar_pci_read_reg ( pcie , where & ~ 3 ) ;
2014-05-12 14:57:48 +04:00
return val > > shift ;
}
/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
2020-05-07 15:33:13 +03:00
static int rcar_pcie_config_access ( struct rcar_pcie_host * host ,
2014-05-12 14:57:48 +04:00
unsigned char access_type , struct pci_bus * bus ,
unsigned int devfn , int where , u32 * data )
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie * pcie = & host - > pcie ;
2019-03-25 14:40:58 +03:00
unsigned int dev , func , reg , index ;
2014-05-12 14:57:48 +04:00
dev = PCI_SLOT ( devfn ) ;
func = PCI_FUNC ( devfn ) ;
reg = where & ~ 3 ;
index = reg / 4 ;
/*
* While each channel has its own memory - mapped extended config
* space , it ' s generally only accessible when in endpoint mode .
* When in root complex mode , the controller is unable to target
* itself with either type 0 or type 1 accesses , and indeed , any
* controller initiated target transfer to its own config space
* result in a completer abort .
*
* Each channel effectively only supports a single device , but as
* the same channel < - > device access works for any PCI_SLOT ( )
* value , we cheat a bit here and bind the controller ' s config
* space to devfn 0 in order to enable self - enumeration . In this
* case the regular ECAR / ECDR path is sidelined and the mangled
* config access itself is initiated as an internal bus transaction .
*/
if ( pci_is_root_bus ( bus ) ) {
if ( dev ! = 0 )
return PCIBIOS_DEVICE_NOT_FOUND ;
2020-07-22 05:25:05 +03:00
if ( access_type = = RCAR_PCI_ACCESS_READ )
2014-06-30 11:54:23 +04:00
* data = rcar_pci_read_reg ( pcie , PCICONF ( index ) ) ;
2020-07-22 05:25:05 +03:00
else
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , * data , PCICONF ( index ) ) ;
2014-05-12 14:57:48 +04:00
return PCIBIOS_SUCCESSFUL ;
}
/* Clear errors */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , rcar_pci_read_reg ( pcie , PCIEERRFR ) , PCIEERRFR ) ;
2014-05-12 14:57:48 +04:00
/* Set the PIO address */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , PCIE_CONF_BUS ( bus - > number ) |
PCIE_CONF_DEV ( dev ) | PCIE_CONF_FUNC ( func ) | reg , PCIECAR ) ;
2014-05-12 14:57:48 +04:00
/* Enable the configuration access */
2020-07-22 05:25:05 +03:00
if ( pci_is_root_bus ( bus - > parent ) )
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , CONFIG_SEND_ENABLE | TYPE0 , PCIECCTLR ) ;
2014-05-12 14:57:48 +04:00
else
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , CONFIG_SEND_ENABLE | TYPE1 , PCIECCTLR ) ;
2014-05-12 14:57:48 +04:00
/* Check for errors */
2014-06-30 11:54:23 +04:00
if ( rcar_pci_read_reg ( pcie , PCIEERRFR ) & UNSUPPORTED_REQUEST )
2014-05-12 14:57:48 +04:00
return PCIBIOS_DEVICE_NOT_FOUND ;
/* Check for master and target aborts */
if ( rcar_read_conf ( pcie , RCONF ( PCI_STATUS ) ) &
( PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT ) )
return PCIBIOS_DEVICE_NOT_FOUND ;
2014-06-30 11:54:23 +04:00
if ( access_type = = RCAR_PCI_ACCESS_READ )
* data = rcar_pci_read_reg ( pcie , PCIECDR ) ;
2014-05-12 14:57:48 +04:00
else
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , * data , PCIECDR ) ;
2014-05-12 14:57:48 +04:00
/* Disable the configuration access */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 0 , PCIECCTLR ) ;
2014-05-12 14:57:48 +04:00
return PCIBIOS_SUCCESSFUL ;
}
static int rcar_pcie_read_conf ( struct pci_bus * bus , unsigned int devfn ,
int where , int size , u32 * val )
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie_host * host = bus - > sysdata ;
2014-05-12 14:57:48 +04:00
int ret ;
2020-05-07 15:33:13 +03:00
ret = rcar_pcie_config_access ( host , RCAR_PCI_ACCESS_READ ,
2014-05-12 14:57:48 +04:00
bus , devfn , where , val ) ;
2021-11-18 17:03:24 +03:00
if ( ret ! = PCIBIOS_SUCCESSFUL )
2014-05-12 14:57:48 +04:00
return ret ;
if ( size = = 1 )
2019-03-25 14:40:59 +03:00
* val = ( * val > > ( BITS_PER_BYTE * ( where & 3 ) ) ) & 0xff ;
2014-05-12 14:57:48 +04:00
else if ( size = = 2 )
2019-03-25 14:40:59 +03:00
* val = ( * val > > ( BITS_PER_BYTE * ( where & 2 ) ) ) & 0xffff ;
2014-05-12 14:57:48 +04:00
2019-03-25 14:41:00 +03:00
dev_dbg ( & bus - > dev , " pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x \n " ,
bus - > number , devfn , where , size , * val ) ;
2014-05-12 14:57:48 +04:00
return ret ;
}
/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
static int rcar_pcie_write_conf ( struct pci_bus * bus , unsigned int devfn ,
int where , int size , u32 val )
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie_host * host = bus - > sysdata ;
2019-03-25 14:40:58 +03:00
unsigned int shift ;
2014-05-12 14:57:48 +04:00
u32 data ;
2019-03-25 14:40:58 +03:00
int ret ;
2014-05-12 14:57:48 +04:00
2020-05-07 15:33:13 +03:00
ret = rcar_pcie_config_access ( host , RCAR_PCI_ACCESS_READ ,
2014-05-12 14:57:48 +04:00
bus , devfn , where , & data ) ;
if ( ret ! = PCIBIOS_SUCCESSFUL )
return ret ;
2019-03-25 14:41:00 +03:00
dev_dbg ( & bus - > dev , " pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x \n " ,
bus - > number , devfn , where , size , val ) ;
2014-05-12 14:57:48 +04:00
if ( size = = 1 ) {
2019-03-25 14:40:59 +03:00
shift = BITS_PER_BYTE * ( where & 3 ) ;
2014-05-12 14:57:48 +04:00
data & = ~ ( 0xff < < shift ) ;
data | = ( ( val & 0xff ) < < shift ) ;
} else if ( size = = 2 ) {
2019-03-25 14:40:59 +03:00
shift = BITS_PER_BYTE * ( where & 2 ) ;
2014-05-12 14:57:48 +04:00
data & = ~ ( 0xffff < < shift ) ;
data | = ( ( val & 0xffff ) < < shift ) ;
} else
data = val ;
2020-05-07 15:33:13 +03:00
ret = rcar_pcie_config_access ( host , RCAR_PCI_ACCESS_WRITE ,
2014-05-12 14:57:48 +04:00
bus , devfn , where , & data ) ;
return ret ;
}
static struct pci_ops rcar_pcie_ops = {
. read = rcar_pcie_read_conf ,
. write = rcar_pcie_write_conf ,
} ;
2016-09-22 23:20:18 +03:00
static void rcar_pcie_force_speedup ( struct rcar_pcie * pcie )
{
2016-10-10 22:31:28 +03:00
struct device * dev = pcie - > dev ;
2016-09-22 23:20:18 +03:00
unsigned int timeout = 1000 ;
u32 macsr ;
if ( ( rcar_pci_read_reg ( pcie , MACS2R ) & LINK_SPEED ) ! = LINK_SPEED_5_0GTS )
return ;
if ( rcar_pci_read_reg ( pcie , MACCTLR ) & SPEED_CHANGE ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " Speed change already in progress \n " ) ;
2016-09-22 23:20:18 +03:00
return ;
}
macsr = rcar_pci_read_reg ( pcie , MACSR ) ;
if ( ( macsr & LINK_SPEED ) = = LINK_SPEED_5_0GTS )
goto done ;
/* Set target link speed to 5.0 GT/s */
rcar_rmw32 ( pcie , EXPCAP ( 12 ) , PCI_EXP_LNKSTA_CLS ,
PCI_EXP_LNKSTA_CLS_5_0GB ) ;
/* Set speed change reason as intentional factor */
rcar_rmw32 ( pcie , MACCGSPSETR , SPCNGRSN , 0 ) ;
/* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
if ( macsr & ( SPCHGFIN | SPCHGSUC | SPCHGFAIL ) )
rcar_pci_write_reg ( pcie , macsr , MACSR ) ;
/* Start link speed change */
rcar_rmw32 ( pcie , MACCTLR , SPEED_CHANGE , SPEED_CHANGE ) ;
while ( timeout - - ) {
macsr = rcar_pci_read_reg ( pcie , MACSR ) ;
if ( macsr & SPCHGFIN ) {
/* Clear the interrupt bits */
rcar_pci_write_reg ( pcie , macsr , MACSR ) ;
if ( macsr & SPCHGFAIL )
2016-10-10 22:31:28 +03:00
dev_err ( dev , " Speed change failed \n " ) ;
2016-09-22 23:20:18 +03:00
goto done ;
}
msleep ( 1 ) ;
2018-03-07 18:42:39 +03:00
}
2016-09-22 23:20:18 +03:00
2016-10-10 22:31:28 +03:00
dev_err ( dev , " Speed change timed out \n " ) ;
2016-09-22 23:20:18 +03:00
done :
2016-10-10 22:31:28 +03:00
dev_info ( dev , " Current link speed is %s GT/s \n " ,
2016-09-22 23:20:18 +03:00
( macsr & LINK_SPEED ) = = LINK_SPEED_5_0GTS ? " 5 " : " 2.5 " ) ;
}
2020-05-07 15:33:13 +03:00
static void rcar_pcie_hw_enable ( struct rcar_pcie_host * host )
2020-03-14 22:12:32 +03:00
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie * pcie = & host - > pcie ;
2020-07-22 05:25:10 +03:00
struct pci_host_bridge * bridge = pci_host_bridge_from_priv ( host ) ;
2020-03-14 22:12:32 +03:00
struct resource_entry * win ;
LIST_HEAD ( res ) ;
int i = 0 ;
/* Try setting 5 GT/s link speed */
2020-05-07 15:33:13 +03:00
rcar_pcie_force_speedup ( pcie ) ;
2020-03-14 22:12:32 +03:00
/* Setup PCI resources */
2020-07-22 05:25:10 +03:00
resource_list_for_each_entry ( win , & bridge - > windows ) {
2020-03-14 22:12:32 +03:00
struct resource * res = win - > res ;
if ( ! res - > flags )
continue ;
switch ( resource_type ( res ) ) {
case IORESOURCE_IO :
case IORESOURCE_MEM :
2020-05-07 15:33:13 +03:00
rcar_pcie_set_outbound ( pcie , i , win ) ;
2020-03-14 22:12:32 +03:00
i + + ;
break ;
}
}
}
2020-05-07 15:33:13 +03:00
static int rcar_pcie_enable ( struct rcar_pcie_host * host )
2014-05-12 14:57:48 +04:00
{
2020-05-07 15:33:13 +03:00
struct pci_host_bridge * bridge = pci_host_bridge_from_priv ( host ) ;
2016-09-22 23:20:18 +03:00
2020-07-22 05:25:10 +03:00
rcar_pcie_hw_enable ( host ) ;
2015-10-02 13:25:05 +03:00
PCI: Remove PCI_REASSIGN_ALL_RSRC use on arm and arm64
On arm, PCI_REASSIGN_ALL_RSRC is used only in pcibios_assign_all_busses(),
which helps decide whether to reconfigure bridge bus numbers. It has
nothing to do with BAR assignments. On arm64 and powerpc,
pcibios_assign_all_busses() tests PCI_REASSIGN_ALL_BUS, which makes more
sense.
Align arm with arm64 and powerpc, so they all use PCI_REASSIGN_ALL_BUS for
pcibios_assign_all_busses().
Remove PCI_REASSIGN_ALL_RSRC from the generic, Tegra, Versatile, and
R-Car drivers. These drivers are used only on arm or arm64, where
PCI_REASSIGN_ALL_RSRC is not used after this change, so removing it
should have no effect.
No functional change intended.
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Reviewed-by: Manikanta Maddireddy <mmaddireddy@nvidia.com>
Reviewed-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
2017-11-30 20:21:57 +03:00
pci_add_flags ( PCI_REASSIGN_ALL_BUS ) ;
2015-10-02 13:25:05 +03:00
2020-05-07 15:33:13 +03:00
bridge - > sysdata = host ;
2017-06-28 23:13:57 +03:00
bridge - > ops = & rcar_pcie_ops ;
2015-10-02 13:25:05 +03:00
2020-05-23 02:48:29 +03:00
return pci_host_probe ( bridge ) ;
2014-05-12 14:57:48 +04:00
}
static int phy_wait_for_ack ( struct rcar_pcie * pcie )
{
2016-10-10 22:31:28 +03:00
struct device * dev = pcie - > dev ;
2014-05-12 14:57:48 +04:00
unsigned int timeout = 100 ;
while ( timeout - - ) {
2014-06-30 11:54:23 +04:00
if ( rcar_pci_read_reg ( pcie , H1_PCIEPHYADRR ) & PHY_ACK )
2014-05-12 14:57:48 +04:00
return 0 ;
udelay ( 100 ) ;
}
2016-10-10 22:31:28 +03:00
dev_err ( dev , " Access to PCIe phy timed out \n " ) ;
2014-05-12 14:57:48 +04:00
return - ETIMEDOUT ;
}
static void phy_write_reg ( struct rcar_pcie * pcie ,
2019-03-25 14:40:58 +03:00
unsigned int rate , u32 addr ,
unsigned int lane , u32 data )
2014-05-12 14:57:48 +04:00
{
2019-03-25 14:40:58 +03:00
u32 phyaddr ;
2014-05-12 14:57:48 +04:00
phyaddr = WRITE_CMD |
( ( rate & 1 ) < < RATE_POS ) |
( ( lane & 0xf ) < < LANE_POS ) |
( ( addr & 0xff ) < < ADR_POS ) ;
/* Set write data */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , data , H1_PCIEPHYDOUTR ) ;
rcar_pci_write_reg ( pcie , phyaddr , H1_PCIEPHYADRR ) ;
2014-05-12 14:57:48 +04:00
/* Ignore errors as they will be dealt with if the data link is down */
phy_wait_for_ack ( pcie ) ;
/* Clear command */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 0 , H1_PCIEPHYDOUTR ) ;
rcar_pci_write_reg ( pcie , 0 , H1_PCIEPHYADRR ) ;
2014-05-12 14:57:48 +04:00
/* Ignore errors as they will be dealt with if the data link is down */
phy_wait_for_ack ( pcie ) ;
}
static int rcar_pcie_hw_init ( struct rcar_pcie * pcie )
{
int err ;
/* Begin initialization */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 0 , PCIETCTLR ) ;
2014-05-12 14:57:48 +04:00
/* Set mode */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 1 , PCIEMSR ) ;
2014-05-12 14:57:48 +04:00
2018-05-03 22:36:37 +03:00
err = rcar_pcie_wait_for_phyrdy ( pcie ) ;
if ( err )
return err ;
2014-05-12 14:57:48 +04:00
/*
* Initial header for port config space is type 1 , set the device
* class to match . Hardware takes care of propagating the IDSETR
* settings , so there is no need to bother with a quirk .
*/
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , PCI_CLASS_BRIDGE_PCI < < 16 , IDSETR1 ) ;
2014-05-12 14:57:48 +04:00
/*
* Setup Secondary Bus Number & Subordinate Bus Number , even though
* they aren ' t used , to avoid bridge being detected as broken .
*/
rcar_rmw32 ( pcie , RCONF ( PCI_SECONDARY_BUS ) , 0xff , 1 ) ;
rcar_rmw32 ( pcie , RCONF ( PCI_SUBORDINATE_BUS ) , 0xff , 1 ) ;
/* Initialize default capabilities. */
2014-06-30 11:54:22 +04:00
rcar_rmw32 ( pcie , REXPCAP ( 0 ) , 0xff , PCI_CAP_ID_EXP ) ;
2014-05-12 14:57:48 +04:00
rcar_rmw32 ( pcie , REXPCAP ( PCI_EXP_FLAGS ) ,
PCI_EXP_FLAGS_TYPE , PCI_EXP_TYPE_ROOT_PORT < < 4 ) ;
rcar_rmw32 ( pcie , RCONF ( PCI_HEADER_TYPE ) , 0x7f ,
PCI_HEADER_TYPE_BRIDGE ) ;
/* Enable data link layer active state reporting */
2014-06-30 11:54:22 +04:00
rcar_rmw32 ( pcie , REXPCAP ( PCI_EXP_LNKCAP ) , PCI_EXP_LNKCAP_DLLLARC ,
PCI_EXP_LNKCAP_DLLLARC ) ;
2014-05-12 14:57:48 +04:00
/* Write out the physical slot number = 0 */
rcar_rmw32 ( pcie , REXPCAP ( PCI_EXP_SLTCAP ) , PCI_EXP_SLTCAP_PSN , 0 ) ;
/* Set the completion timer timeout to the maximum 50ms. */
2014-06-30 11:54:23 +04:00
rcar_rmw32 ( pcie , TLCTLR + 1 , 0x3f , 50 ) ;
2014-05-12 14:57:48 +04:00
/* Terminate list of capabilities (Next Capability Offset=0) */
2014-06-30 11:54:22 +04:00
rcar_rmw32 ( pcie , RVCCAP ( 0 ) , 0xfff00000 , 0 ) ;
2014-05-12 14:57:48 +04:00
2014-05-12 14:57:49 +04:00
/* Enable MSI */
if ( IS_ENABLED ( CONFIG_PCI_MSI ) )
2015-02-02 08:09:39 +03:00
rcar_pci_write_reg ( pcie , 0x801f0000 , PCIEMSITXR ) ;
2014-05-12 14:57:49 +04:00
2019-11-05 13:51:29 +03:00
rcar_pci_write_reg ( pcie , MACCTLR_INIT_VAL , MACCTLR ) ;
2014-05-12 14:57:48 +04:00
/* Finish initialization - establish a PCI Express link */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , CFINIT , PCIETCTLR ) ;
2014-05-12 14:57:48 +04:00
/* This will timeout if we don't have a link. */
err = rcar_pcie_wait_for_dl ( pcie ) ;
if ( err )
return err ;
/* Enable INTx interrupts */
rcar_rmw32 ( pcie , PCIEINTXR , 0 , 0xF < < 8 ) ;
wmb ( ) ;
return 0 ;
}
2020-05-07 15:33:13 +03:00
static int rcar_pcie_phy_init_h1 ( struct rcar_pcie_host * host )
2014-05-12 14:57:48 +04:00
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie * pcie = & host - > pcie ;
2014-05-12 14:57:48 +04:00
/* Initialize the phy */
phy_write_reg ( pcie , 0 , 0x42 , 0x1 , 0x0EC34191 ) ;
phy_write_reg ( pcie , 1 , 0x42 , 0x1 , 0x0EC34180 ) ;
phy_write_reg ( pcie , 0 , 0x43 , 0x1 , 0x00210188 ) ;
phy_write_reg ( pcie , 1 , 0x43 , 0x1 , 0x00210188 ) ;
phy_write_reg ( pcie , 0 , 0x44 , 0x1 , 0x015C0014 ) ;
phy_write_reg ( pcie , 1 , 0x44 , 0x1 , 0x015C0014 ) ;
phy_write_reg ( pcie , 1 , 0x4C , 0x1 , 0x786174A0 ) ;
phy_write_reg ( pcie , 1 , 0x4D , 0x1 , 0x048000BB ) ;
phy_write_reg ( pcie , 0 , 0x51 , 0x1 , 0x079EC062 ) ;
phy_write_reg ( pcie , 0 , 0x52 , 0x1 , 0x20000000 ) ;
phy_write_reg ( pcie , 1 , 0x52 , 0x1 , 0x20000000 ) ;
phy_write_reg ( pcie , 1 , 0x56 , 0x1 , 0x00003806 ) ;
phy_write_reg ( pcie , 0 , 0x60 , 0x1 , 0x004B03A5 ) ;
phy_write_reg ( pcie , 0 , 0x64 , 0x1 , 0x3F0F1F0F ) ;
phy_write_reg ( pcie , 0 , 0x66 , 0x1 , 0x00008000 ) ;
2018-05-03 22:43:13 +03:00
return 0 ;
2014-05-12 14:57:48 +04:00
}
2020-05-07 15:33:13 +03:00
static int rcar_pcie_phy_init_gen2 ( struct rcar_pcie_host * host )
2016-01-05 16:00:31 +03:00
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie * pcie = & host - > pcie ;
2016-01-05 16:00:31 +03:00
/*
* These settings come from the R - Car Series , 2 nd Generation User ' s
* Manual , section 50.3 .1 ( 2 ) Initialization of the physical layer .
*/
rcar_pci_write_reg ( pcie , 0x000f0030 , GEN2_PCIEPHYADDR ) ;
rcar_pci_write_reg ( pcie , 0x00381203 , GEN2_PCIEPHYDATA ) ;
rcar_pci_write_reg ( pcie , 0x00000001 , GEN2_PCIEPHYCTRL ) ;
rcar_pci_write_reg ( pcie , 0x00000006 , GEN2_PCIEPHYCTRL ) ;
rcar_pci_write_reg ( pcie , 0x000f0054 , GEN2_PCIEPHYADDR ) ;
/* The following value is for DC connection, no termination resistor */
rcar_pci_write_reg ( pcie , 0x13802007 , GEN2_PCIEPHYDATA ) ;
rcar_pci_write_reg ( pcie , 0x00000001 , GEN2_PCIEPHYCTRL ) ;
rcar_pci_write_reg ( pcie , 0x00000006 , GEN2_PCIEPHYCTRL ) ;
2018-05-03 22:43:13 +03:00
return 0 ;
2016-01-05 16:00:31 +03:00
}
2020-05-07 15:33:13 +03:00
static int rcar_pcie_phy_init_gen3 ( struct rcar_pcie_host * host )
2018-05-03 22:40:54 +03:00
{
int err ;
2020-05-07 15:33:13 +03:00
err = phy_init ( host - > phy ) ;
2018-05-03 22:40:54 +03:00
if ( err )
return err ;
2020-05-07 15:33:13 +03:00
err = phy_power_on ( host - > phy ) ;
2018-06-29 21:48:15 +03:00
if ( err )
2020-05-07 15:33:13 +03:00
phy_exit ( host - > phy ) ;
2018-06-29 21:48:15 +03:00
return err ;
2016-01-05 16:00:31 +03:00
}
2014-05-12 14:57:49 +04:00
static irqreturn_t rcar_pcie_msi_irq ( int irq , void * data )
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie_host * host = data ;
struct rcar_pcie * pcie = & host - > pcie ;
struct rcar_msi * msi = & host - > msi ;
2016-10-10 22:31:28 +03:00
struct device * dev = pcie - > dev ;
2014-05-12 14:57:49 +04:00
unsigned long reg ;
2014-06-30 11:54:23 +04:00
reg = rcar_pci_read_reg ( pcie , PCIEMSIFR ) ;
2014-05-12 14:57:49 +04:00
/* MSI & INTx share an interrupt - we only handle MSI here */
if ( ! reg )
return IRQ_NONE ;
while ( reg ) {
unsigned int index = find_first_bit ( & reg , 32 ) ;
2021-08-02 19:26:19 +03:00
int ret ;
2014-05-12 14:57:49 +04:00
2021-08-02 19:26:19 +03:00
ret = generic_handle_domain_irq ( msi - > domain - > parent , index ) ;
if ( ret ) {
2014-05-12 14:57:49 +04:00
/* Unknown MSI, just clear it */
2016-10-10 22:31:28 +03:00
dev_dbg ( dev , " unexpected MSI \n " ) ;
2021-03-30 18:11:34 +03:00
rcar_pci_write_reg ( pcie , BIT ( index ) , PCIEMSIFR ) ;
2014-05-12 14:57:49 +04:00
}
/* see if there's any more pending in this vector */
2014-06-30 11:54:23 +04:00
reg = rcar_pci_read_reg ( pcie , PCIEMSIFR ) ;
2014-05-12 14:57:49 +04:00
}
return IRQ_HANDLED ;
}
2021-03-30 18:11:34 +03:00
static void rcar_msi_top_irq_ack ( struct irq_data * d )
2014-05-12 14:57:49 +04:00
{
2021-03-30 18:11:34 +03:00
irq_chip_ack_parent ( d ) ;
}
2014-05-12 14:57:49 +04:00
2021-03-30 18:11:34 +03:00
static void rcar_msi_top_irq_mask ( struct irq_data * d )
{
pci_msi_mask_irq ( d ) ;
irq_chip_mask_parent ( d ) ;
}
2014-05-12 14:57:49 +04:00
2021-03-30 18:11:34 +03:00
static void rcar_msi_top_irq_unmask ( struct irq_data * d )
{
pci_msi_unmask_irq ( d ) ;
irq_chip_unmask_parent ( d ) ;
}
2014-05-12 14:57:49 +04:00
2021-03-30 18:11:34 +03:00
static struct irq_chip rcar_msi_top_chip = {
. name = " PCIe MSI " ,
. irq_ack = rcar_msi_top_irq_ack ,
. irq_mask = rcar_msi_top_irq_mask ,
. irq_unmask = rcar_msi_top_irq_unmask ,
} ;
2014-05-12 14:57:49 +04:00
2021-03-30 18:11:34 +03:00
static void rcar_msi_irq_ack ( struct irq_data * d )
{
struct rcar_msi * msi = irq_data_get_irq_chip_data ( d ) ;
struct rcar_pcie * pcie = & msi_to_host ( msi ) - > pcie ;
2014-05-12 14:57:49 +04:00
2021-03-30 18:11:34 +03:00
/* clear the interrupt */
rcar_pci_write_reg ( pcie , BIT ( d - > hwirq ) , PCIEMSIFR ) ;
}
2014-05-12 14:57:49 +04:00
2021-03-30 18:11:34 +03:00
static void rcar_msi_irq_mask ( struct irq_data * d )
{
struct rcar_msi * msi = irq_data_get_irq_chip_data ( d ) ;
struct rcar_pcie * pcie = & msi_to_host ( msi ) - > pcie ;
unsigned long flags ;
u32 value ;
spin_lock_irqsave ( & msi - > mask_lock , flags ) ;
value = rcar_pci_read_reg ( pcie , PCIEMSIIER ) ;
value & = ~ BIT ( d - > hwirq ) ;
rcar_pci_write_reg ( pcie , value , PCIEMSIIER ) ;
spin_unlock_irqrestore ( & msi - > mask_lock , flags ) ;
2014-05-12 14:57:49 +04:00
}
2021-03-30 18:11:34 +03:00
static void rcar_msi_irq_unmask ( struct irq_data * d )
2016-09-08 22:32:59 +03:00
{
2021-03-30 18:11:34 +03:00
struct rcar_msi * msi = irq_data_get_irq_chip_data ( d ) ;
struct rcar_pcie * pcie = & msi_to_host ( msi ) - > pcie ;
unsigned long flags ;
u32 value ;
spin_lock_irqsave ( & msi - > mask_lock , flags ) ;
value = rcar_pci_read_reg ( pcie , PCIEMSIIER ) ;
value | = BIT ( d - > hwirq ) ;
rcar_pci_write_reg ( pcie , value , PCIEMSIIER ) ;
spin_unlock_irqrestore ( & msi - > mask_lock , flags ) ;
}
2016-09-08 22:32:59 +03:00
2021-03-30 18:11:34 +03:00
static int rcar_msi_set_affinity ( struct irq_data * d , const struct cpumask * mask , bool force )
{
return - EINVAL ;
}
2016-09-08 22:32:59 +03:00
2021-03-30 18:11:34 +03:00
static void rcar_compose_msi_msg ( struct irq_data * data , struct msi_msg * msg )
{
struct rcar_msi * msi = irq_data_get_irq_chip_data ( data ) ;
struct rcar_pcie * pcie = & msi_to_host ( msi ) - > pcie ;
2016-09-08 22:32:59 +03:00
2021-03-30 18:11:34 +03:00
msg - > address_lo = rcar_pci_read_reg ( pcie , PCIEMSIALR ) & ~ MSIFE ;
msg - > address_hi = rcar_pci_read_reg ( pcie , PCIEMSIAUR ) ;
msg - > data = data - > hwirq ;
}
2016-09-08 22:32:59 +03:00
2021-03-30 18:11:34 +03:00
static struct irq_chip rcar_msi_bottom_chip = {
. name = " Rcar MSI " ,
. irq_ack = rcar_msi_irq_ack ,
. irq_mask = rcar_msi_irq_mask ,
. irq_unmask = rcar_msi_irq_unmask ,
. irq_set_affinity = rcar_msi_set_affinity ,
. irq_compose_msi_msg = rcar_compose_msi_msg ,
} ;
2016-09-08 22:32:59 +03:00
2021-03-30 18:11:34 +03:00
static int rcar_msi_domain_alloc ( struct irq_domain * domain , unsigned int virq ,
unsigned int nr_irqs , void * args )
{
struct rcar_msi * msi = domain - > host_data ;
unsigned int i ;
int hwirq ;
mutex_lock ( & msi - > map_lock ) ;
2016-09-08 22:32:59 +03:00
2021-03-30 18:11:34 +03:00
hwirq = bitmap_find_free_region ( msi - > used , INT_PCI_MSI_NR , order_base_2 ( nr_irqs ) ) ;
2016-09-08 22:32:59 +03:00
2021-03-30 18:11:34 +03:00
mutex_unlock ( & msi - > map_lock ) ;
if ( hwirq < 0 )
return - ENOSPC ;
2016-09-08 22:32:59 +03:00
2021-03-30 18:11:34 +03:00
for ( i = 0 ; i < nr_irqs ; i + + )
irq_domain_set_info ( domain , virq + i , hwirq + i ,
& rcar_msi_bottom_chip , domain - > host_data ,
handle_edge_irq , NULL , NULL ) ;
2016-09-08 22:32:59 +03:00
return 0 ;
}
2021-03-30 18:11:34 +03:00
static void rcar_msi_domain_free ( struct irq_domain * domain , unsigned int virq ,
unsigned int nr_irqs )
2014-05-12 14:57:49 +04:00
{
2021-03-30 18:11:34 +03:00
struct irq_data * d = irq_domain_get_irq_data ( domain , virq ) ;
struct rcar_msi * msi = domain - > host_data ;
2014-05-12 14:57:49 +04:00
2021-03-30 18:11:34 +03:00
mutex_lock ( & msi - > map_lock ) ;
2014-05-12 14:57:49 +04:00
2021-03-30 18:11:34 +03:00
bitmap_release_region ( msi - > used , d - > hwirq , order_base_2 ( nr_irqs ) ) ;
2014-05-12 14:57:49 +04:00
2021-03-30 18:11:34 +03:00
mutex_unlock ( & msi - > map_lock ) ;
2014-05-12 14:57:49 +04:00
}
2021-03-30 18:11:34 +03:00
static const struct irq_domain_ops rcar_msi_domain_ops = {
. alloc = rcar_msi_domain_alloc ,
. free = rcar_msi_domain_free ,
} ;
static struct msi_domain_info rcar_msi_info = {
. flags = ( MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_MULTI_PCI_MSI ) ,
. chip = & rcar_msi_top_chip ,
2014-05-12 14:57:49 +04:00
} ;
2021-03-30 18:11:34 +03:00
static int rcar_allocate_domains ( struct rcar_msi * msi )
2018-05-24 17:36:23 +03:00
{
2021-03-30 18:11:34 +03:00
struct rcar_pcie * pcie = & msi_to_host ( msi ) - > pcie ;
struct fwnode_handle * fwnode = dev_fwnode ( pcie - > dev ) ;
struct irq_domain * parent ;
parent = irq_domain_create_linear ( fwnode , INT_PCI_MSI_NR ,
& rcar_msi_domain_ops , msi ) ;
if ( ! parent ) {
dev_err ( pcie - > dev , " failed to create IRQ domain \n " ) ;
return - ENOMEM ;
}
irq_domain_update_bus_token ( parent , DOMAIN_BUS_NEXUS ) ;
2018-05-24 17:36:23 +03:00
2021-03-30 18:11:34 +03:00
msi - > domain = pci_msi_create_irq_domain ( fwnode , & rcar_msi_info , parent ) ;
if ( ! msi - > domain ) {
dev_err ( pcie - > dev , " failed to create MSI domain \n " ) ;
irq_domain_remove ( parent ) ;
return - ENOMEM ;
2018-05-24 17:36:23 +03:00
}
2021-03-30 18:11:34 +03:00
return 0 ;
2018-05-24 17:36:23 +03:00
}
2021-03-30 18:11:34 +03:00
static void rcar_free_domains ( struct rcar_msi * msi )
2020-03-14 22:12:32 +03:00
{
2021-03-30 18:11:34 +03:00
struct irq_domain * parent = msi - > domain - > parent ;
2020-03-14 22:12:32 +03:00
2021-03-30 18:11:34 +03:00
irq_domain_remove ( msi - > domain ) ;
irq_domain_remove ( parent ) ;
2020-03-14 22:12:32 +03:00
}
2020-05-07 15:33:13 +03:00
static int rcar_pcie_enable_msi ( struct rcar_pcie_host * host )
2014-05-12 14:57:49 +04:00
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie * pcie = & host - > pcie ;
2016-10-10 22:31:28 +03:00
struct device * dev = pcie - > dev ;
2020-05-07 15:33:13 +03:00
struct rcar_msi * msi = & host - > msi ;
2021-03-30 18:11:34 +03:00
struct resource res ;
int err ;
2014-05-12 14:57:49 +04:00
2021-03-30 18:11:34 +03:00
mutex_init ( & msi - > map_lock ) ;
spin_lock_init ( & msi - > mask_lock ) ;
2014-05-12 14:57:49 +04:00
2021-03-30 18:11:34 +03:00
err = of_address_to_resource ( dev - > of_node , 0 , & res ) ;
if ( err )
return err ;
2014-05-12 14:57:49 +04:00
2021-03-30 18:11:34 +03:00
err = rcar_allocate_domains ( msi ) ;
if ( err )
return err ;
2016-09-08 22:32:59 +03:00
2014-05-12 14:57:49 +04:00
/* Two irqs are for MSI, but they are also used for non-MSI irqs */
2016-10-10 22:31:28 +03:00
err = devm_request_irq ( dev , msi - > irq1 , rcar_pcie_msi_irq ,
2015-12-10 22:18:20 +03:00
IRQF_SHARED | IRQF_NO_THREAD ,
2021-03-30 18:11:34 +03:00
rcar_msi_bottom_chip . name , host ) ;
2014-05-12 14:57:49 +04:00
if ( err < 0 ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " failed to request IRQ: %d \n " , err ) ;
2014-05-12 14:57:49 +04:00
goto err ;
}
2016-10-10 22:31:28 +03:00
err = devm_request_irq ( dev , msi - > irq2 , rcar_pcie_msi_irq ,
2015-12-10 22:18:20 +03:00
IRQF_SHARED | IRQF_NO_THREAD ,
2021-03-30 18:11:34 +03:00
rcar_msi_bottom_chip . name , host ) ;
2014-05-12 14:57:49 +04:00
if ( err < 0 ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " failed to request IRQ: %d \n " , err ) ;
2014-05-12 14:57:49 +04:00
goto err ;
}
2021-03-30 18:11:34 +03:00
/* disable all MSIs */
rcar_pci_write_reg ( pcie , 0 , PCIEMSIIER ) ;
/*
* Setup MSI data target using RC base address address , which
* is guaranteed to be in the low 32 bit range on any RCar HW .
*/
rcar_pci_write_reg ( pcie , lower_32_bits ( res . start ) | MSIFE , PCIEMSIALR ) ;
rcar_pci_write_reg ( pcie , upper_32_bits ( res . start ) , PCIEMSIAUR ) ;
2014-05-12 14:57:49 +04:00
return 0 ;
err :
2021-03-30 18:11:34 +03:00
rcar_free_domains ( msi ) ;
2014-05-12 14:57:49 +04:00
return err ;
}
2020-05-07 15:33:13 +03:00
static void rcar_pcie_teardown_msi ( struct rcar_pcie_host * host )
2018-05-24 17:36:21 +03:00
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie * pcie = & host - > pcie ;
2018-05-24 17:36:21 +03:00
/* Disable all MSI interrupts */
rcar_pci_write_reg ( pcie , 0 , PCIEMSIIER ) ;
/* Disable address decoding of the MSI interrupt, MSIFE */
rcar_pci_write_reg ( pcie , 0 , PCIEMSIALR ) ;
2021-03-30 18:11:34 +03:00
rcar_free_domains ( & host - > msi ) ;
2018-05-24 17:36:21 +03:00
}
2020-05-07 15:33:13 +03:00
static int rcar_pcie_get_resources ( struct rcar_pcie_host * host )
2014-05-12 14:57:48 +04:00
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie * pcie = & host - > pcie ;
2016-10-10 22:31:28 +03:00
struct device * dev = pcie - > dev ;
2014-05-12 14:57:48 +04:00
struct resource res ;
2014-05-12 14:57:49 +04:00
int err , i ;
2014-05-12 14:57:48 +04:00
2020-05-07 15:33:13 +03:00
host - > phy = devm_phy_optional_get ( dev , " pcie " ) ;
if ( IS_ERR ( host - > phy ) )
return PTR_ERR ( host - > phy ) ;
2018-05-03 22:40:54 +03:00
2016-10-10 22:31:28 +03:00
err = of_address_to_resource ( dev - > of_node , 0 , & res ) ;
2014-05-12 14:57:48 +04:00
if ( err )
return err ;
2016-10-10 22:31:28 +03:00
pcie - > base = devm_ioremap_resource ( dev , & res ) ;
2016-08-22 22:16:38 +03:00
if ( IS_ERR ( pcie - > base ) )
return PTR_ERR ( pcie - > base ) ;
2020-05-07 15:33:13 +03:00
host - > bus_clk = devm_clk_get ( dev , " pcie_bus " ) ;
if ( IS_ERR ( host - > bus_clk ) ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " cannot get pcie bus clock \n " ) ;
2020-05-07 15:33:13 +03:00
return PTR_ERR ( host - > bus_clk ) ;
2014-05-12 14:57:48 +04:00
}
2016-10-10 22:31:28 +03:00
i = irq_of_parse_and_map ( dev - > of_node , 0 ) ;
2014-11-15 01:21:53 +03:00
if ( ! i ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " cannot get platform resources for msi interrupt \n " ) ;
2014-05-12 14:57:49 +04:00
err = - ENOENT ;
2018-05-24 17:36:20 +03:00
goto err_irq1 ;
2014-05-12 14:57:49 +04:00
}
2020-05-07 15:33:13 +03:00
host - > msi . irq1 = i ;
2014-05-12 14:57:49 +04:00
2016-10-10 22:31:28 +03:00
i = irq_of_parse_and_map ( dev - > of_node , 1 ) ;
2014-11-15 01:21:53 +03:00
if ( ! i ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " cannot get platform resources for msi interrupt \n " ) ;
2014-05-12 14:57:49 +04:00
err = - ENOENT ;
2018-05-24 17:36:20 +03:00
goto err_irq2 ;
2014-05-12 14:57:49 +04:00
}
2020-05-07 15:33:13 +03:00
host - > msi . irq2 = i ;
2014-05-12 14:57:49 +04:00
2021-08-15 21:16:50 +03:00
# ifdef CONFIG_ARM
/* Cache static copy for L1 link state fixup hook on aarch32 */
pcie_base = pcie - > base ;
2021-11-15 23:46:41 +03:00
pcie_dev = pcie - > dev ;
2021-08-15 21:16:50 +03:00
# endif
2014-05-12 14:57:48 +04:00
return 0 ;
2018-05-24 17:36:20 +03:00
err_irq2 :
2020-05-07 15:33:13 +03:00
irq_dispose_mapping ( host - > msi . irq1 ) ;
2018-05-24 17:36:20 +03:00
err_irq1 :
2014-05-12 14:57:48 +04:00
return err ;
}
static int rcar_pcie_inbound_ranges ( struct rcar_pcie * pcie ,
2019-10-28 19:32:55 +03:00
struct resource_entry * entry ,
2014-05-12 14:57:48 +04:00
int * index )
{
2019-10-28 19:32:55 +03:00
u64 restype = entry - > res - > flags ;
u64 cpu_addr = entry - > res - > start ;
u64 cpu_end = entry - > res - > end ;
u64 pci_addr = entry - > res - > start - entry - > offset ;
2014-05-12 14:57:48 +04:00
u32 flags = LAM_64BIT | LAR_ENABLE ;
u64 mask ;
2019-10-28 19:32:55 +03:00
u64 size = resource_size ( entry - > res ) ;
2014-05-12 14:57:48 +04:00
int idx = * index ;
if ( restype & IORESOURCE_PREFETCH )
flags | = LAM_PREFETCH ;
while ( cpu_addr < cpu_end ) {
2019-10-26 21:26:58 +03:00
if ( idx > = MAX_NR_INBOUND_MAPS - 1 ) {
dev_err ( pcie - > dev , " Failed to map inbound regions! \n " ) ;
return - EINVAL ;
}
2019-10-26 21:26:59 +03:00
/*
* If the size of the range is larger than the alignment of
* the start address , we have to use multiple entries to
* perform the mapping .
*/
if ( cpu_addr > 0 ) {
unsigned long nr_zeros = __ffs64 ( cpu_addr ) ;
u64 alignment = 1ULL < < nr_zeros ;
2019-11-28 17:54:53 +03:00
size = min ( size , alignment ) ;
2019-10-26 21:26:59 +03:00
}
/* Hardware supports max 4GiB inbound region */
size = min ( size , 1ULL < < 32 ) ;
mask = roundup_pow_of_two ( size ) - 1 ;
mask & = ~ 0xf ;
2020-05-07 15:33:13 +03:00
rcar_pcie_set_inbound ( pcie , cpu_addr , pci_addr ,
lower_32_bits ( mask ) | flags , idx , true ) ;
2014-05-12 14:57:48 +04:00
pci_addr + = size ;
cpu_addr + = size ;
idx + = 2 ;
}
* index = idx ;
return 0 ;
}
2020-05-07 15:33:13 +03:00
static int rcar_pcie_parse_map_dma_ranges ( struct rcar_pcie_host * host )
2014-05-12 14:57:48 +04:00
{
2020-05-07 15:33:13 +03:00
struct pci_host_bridge * bridge = pci_host_bridge_from_priv ( host ) ;
2019-10-28 19:32:55 +03:00
struct resource_entry * entry ;
int index = 0 , err = 0 ;
2014-05-12 14:57:48 +04:00
2019-10-28 19:32:55 +03:00
resource_list_for_each_entry ( entry , & bridge - > dma_ranges ) {
2020-05-07 15:33:13 +03:00
err = rcar_pcie_inbound_ranges ( & host - > pcie , entry , & index ) ;
2014-05-12 14:57:48 +04:00
if ( err )
2019-10-28 19:32:55 +03:00
break ;
2014-05-12 14:57:48 +04:00
}
2019-10-28 19:32:55 +03:00
return err ;
2014-05-12 14:57:48 +04:00
}
static const struct of_device_id rcar_pcie_of_match [ ] = {
2018-05-03 22:43:13 +03:00
{ . compatible = " renesas,pcie-r8a7779 " ,
. data = rcar_pcie_phy_init_h1 } ,
2016-09-09 01:26:18 +03:00
{ . compatible = " renesas,pcie-r8a7790 " ,
2018-05-03 22:43:13 +03:00
. data = rcar_pcie_phy_init_gen2 } ,
2016-09-09 01:26:18 +03:00
{ . compatible = " renesas,pcie-r8a7791 " ,
2018-05-03 22:43:13 +03:00
. data = rcar_pcie_phy_init_gen2 } ,
2016-12-06 18:51:30 +03:00
{ . compatible = " renesas,pcie-rcar-gen2 " ,
2018-05-03 22:43:13 +03:00
. data = rcar_pcie_phy_init_gen2 } ,
2018-05-03 22:40:54 +03:00
{ . compatible = " renesas,pcie-r8a7795 " ,
2018-05-03 22:43:13 +03:00
. data = rcar_pcie_phy_init_gen3 } ,
2018-05-03 22:40:54 +03:00
{ . compatible = " renesas,pcie-rcar-gen3 " ,
2018-05-03 22:43:13 +03:00
. data = rcar_pcie_phy_init_gen3 } ,
2014-05-12 14:57:48 +04:00
{ } ,
} ;
2015-11-25 18:30:37 +03:00
2014-05-12 14:57:48 +04:00
static int rcar_pcie_probe ( struct platform_device * pdev )
{
2016-10-10 22:31:28 +03:00
struct device * dev = & pdev - > dev ;
2020-05-07 15:33:13 +03:00
struct rcar_pcie_host * host ;
2014-05-12 14:57:48 +04:00
struct rcar_pcie * pcie ;
2019-03-25 14:40:58 +03:00
u32 data ;
2015-11-25 18:30:37 +03:00
int err ;
2017-06-28 23:13:57 +03:00
struct pci_host_bridge * bridge ;
2014-05-12 14:57:48 +04:00
2020-07-22 05:25:09 +03:00
bridge = devm_pci_alloc_host_bridge ( dev , sizeof ( * host ) ) ;
2017-06-28 23:13:57 +03:00
if ( ! bridge )
2014-05-12 14:57:48 +04:00
return - ENOMEM ;
2020-05-07 15:33:13 +03:00
host = pci_host_bridge_priv ( bridge ) ;
pcie = & host - > pcie ;
2016-10-10 22:31:28 +03:00
pcie - > dev = dev ;
2020-05-07 15:33:13 +03:00
platform_set_drvdata ( pdev , host ) ;
2014-05-12 14:57:48 +04:00
2018-04-08 16:09:25 +03:00
pm_runtime_enable ( pcie - > dev ) ;
err = pm_runtime_get_sync ( pcie - > dev ) ;
if ( err < 0 ) {
dev_err ( pcie - > dev , " pm_runtime_get_sync failed \n " ) ;
2020-07-09 09:43:56 +03:00
goto err_pm_put ;
2018-04-08 16:09:25 +03:00
}
2020-05-07 15:33:13 +03:00
err = rcar_pcie_get_resources ( host ) ;
2014-05-12 14:57:48 +04:00
if ( err < 0 ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " failed to request resources: %d \n " , err ) ;
2018-04-08 16:09:25 +03:00
goto err_pm_put ;
2014-05-12 14:57:48 +04:00
}
2020-05-07 15:33:13 +03:00
err = clk_prepare_enable ( host - > bus_clk ) ;
2018-05-24 17:36:19 +03:00
if ( err ) {
dev_err ( dev , " failed to enable bus clock: %d \n " , err ) ;
2018-05-24 17:36:20 +03:00
goto err_unmap_msi_irqs ;
2014-05-12 14:57:48 +04:00
}
2020-05-07 15:33:13 +03:00
err = rcar_pcie_parse_map_dma_ranges ( host ) ;
2016-09-09 01:26:18 +03:00
if ( err )
2018-05-24 17:36:19 +03:00
goto err_clk_disable ;
2014-05-12 14:57:48 +04:00
2020-05-07 15:33:13 +03:00
host - > phy_init_fn = of_device_get_match_data ( dev ) ;
err = host - > phy_init_fn ( host ) ;
2014-05-12 14:57:48 +04:00
if ( err ) {
2018-05-03 22:43:13 +03:00
dev_err ( dev , " failed to init PCIe PHY \n " ) ;
2018-05-24 17:36:19 +03:00
goto err_clk_disable ;
2016-01-05 16:00:30 +03:00
}
2014-05-12 14:57:48 +04:00
/* Failure to get a link might just be that no cards are inserted */
2018-05-03 22:43:13 +03:00
if ( rcar_pcie_hw_init ( pcie ) ) {
2016-10-10 22:31:28 +03:00
dev_info ( dev , " PCIe link down \n " ) ;
2016-12-16 14:50:04 +03:00
err = - ENODEV ;
2018-06-29 21:47:38 +03:00
goto err_phy_shutdown ;
2014-05-12 14:57:48 +04:00
}
2014-06-30 11:54:23 +04:00
data = rcar_pci_read_reg ( pcie , MACSR ) ;
2016-10-10 22:31:28 +03:00
dev_info ( dev , " PCIe x%d: link up \n " , ( data > > 20 ) & 0x3f ) ;
2014-05-12 14:57:48 +04:00
2016-01-05 16:00:30 +03:00
if ( IS_ENABLED ( CONFIG_PCI_MSI ) ) {
2020-05-07 15:33:13 +03:00
err = rcar_pcie_enable_msi ( host ) ;
2016-01-05 16:00:30 +03:00
if ( err < 0 ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev ,
2016-01-05 16:00:30 +03:00
" failed to enable MSI support: %d \n " ,
err ) ;
2018-06-29 21:47:38 +03:00
goto err_phy_shutdown ;
2016-01-05 16:00:30 +03:00
}
}
2020-05-07 15:33:13 +03:00
err = rcar_pcie_enable ( host ) ;
2016-01-05 16:00:30 +03:00
if ( err )
2018-05-24 17:36:21 +03:00
goto err_msi_teardown ;
2016-01-05 16:00:30 +03:00
return 0 ;
2018-05-24 17:36:21 +03:00
err_msi_teardown :
if ( IS_ENABLED ( CONFIG_PCI_MSI ) )
2020-05-07 15:33:13 +03:00
rcar_pcie_teardown_msi ( host ) ;
2018-05-24 17:36:21 +03:00
2018-06-29 21:47:38 +03:00
err_phy_shutdown :
2020-05-07 15:33:13 +03:00
if ( host - > phy ) {
phy_power_off ( host - > phy ) ;
phy_exit ( host - > phy ) ;
2018-06-29 21:47:38 +03:00
}
2018-05-24 17:36:19 +03:00
err_clk_disable :
2020-05-07 15:33:13 +03:00
clk_disable_unprepare ( host - > bus_clk ) ;
2018-05-24 17:36:19 +03:00
2018-05-24 17:36:20 +03:00
err_unmap_msi_irqs :
2020-05-07 15:33:13 +03:00
irq_dispose_mapping ( host - > msi . irq2 ) ;
irq_dispose_mapping ( host - > msi . irq1 ) ;
2018-05-24 17:36:20 +03:00
2016-01-05 16:00:30 +03:00
err_pm_put :
2016-10-10 22:31:28 +03:00
pm_runtime_put ( dev ) ;
pm_runtime_disable ( dev ) ;
2017-08-04 06:32:54 +03:00
2016-01-05 16:00:30 +03:00
return err ;
2014-05-12 14:57:48 +04:00
}
2020-03-14 22:12:32 +03:00
static int __maybe_unused rcar_pcie_resume ( struct device * dev )
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie_host * host = dev_get_drvdata ( dev ) ;
struct rcar_pcie * pcie = & host - > pcie ;
2020-03-14 22:12:32 +03:00
unsigned int data ;
int err ;
2020-05-07 15:33:13 +03:00
err = rcar_pcie_parse_map_dma_ranges ( host ) ;
2020-03-14 22:12:32 +03:00
if ( err )
return 0 ;
/* Failure to get a link might just be that no cards are inserted */
2020-05-07 15:33:13 +03:00
err = host - > phy_init_fn ( host ) ;
2020-03-14 22:12:32 +03:00
if ( err ) {
dev_info ( dev , " PCIe link down \n " ) ;
return 0 ;
}
data = rcar_pci_read_reg ( pcie , MACSR ) ;
dev_info ( dev , " PCIe x%d: link up \n " , ( data > > 20 ) & 0x3f ) ;
/* Enable MSI */
2021-03-30 18:11:34 +03:00
if ( IS_ENABLED ( CONFIG_PCI_MSI ) ) {
struct resource res ;
u32 val ;
of_address_to_resource ( dev - > of_node , 0 , & res ) ;
rcar_pci_write_reg ( pcie , upper_32_bits ( res . start ) , PCIEMSIAUR ) ;
rcar_pci_write_reg ( pcie , lower_32_bits ( res . start ) | MSIFE , PCIEMSIALR ) ;
bitmap_to_arr32 ( & val , host - > msi . used , INT_PCI_MSI_NR ) ;
rcar_pci_write_reg ( pcie , val , PCIEMSIIER ) ;
}
2020-03-14 22:12:32 +03:00
2020-05-07 15:33:13 +03:00
rcar_pcie_hw_enable ( host ) ;
2020-03-14 22:12:32 +03:00
return 0 ;
}
2019-03-25 22:43:19 +03:00
static int rcar_pcie_resume_noirq ( struct device * dev )
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie_host * host = dev_get_drvdata ( dev ) ;
struct rcar_pcie * pcie = & host - > pcie ;
2019-03-25 22:43:19 +03:00
if ( rcar_pci_read_reg ( pcie , PMSR ) & &
! ( rcar_pci_read_reg ( pcie , PCIETCTLR ) & DL_DOWN ) )
return 0 ;
/* Re-establish the PCIe link */
2019-11-05 13:51:29 +03:00
rcar_pci_write_reg ( pcie , MACCTLR_INIT_VAL , MACCTLR ) ;
2019-03-25 22:43:19 +03:00
rcar_pci_write_reg ( pcie , CFINIT , PCIETCTLR ) ;
return rcar_pcie_wait_for_dl ( pcie ) ;
}
static const struct dev_pm_ops rcar_pcie_pm_ops = {
2020-03-14 22:12:32 +03:00
SET_SYSTEM_SLEEP_PM_OPS ( NULL , rcar_pcie_resume )
2019-03-25 22:43:19 +03:00
. resume_noirq = rcar_pcie_resume_noirq ,
} ;
2014-05-12 14:57:48 +04:00
static struct platform_driver rcar_pcie_driver = {
. driver = {
2016-10-06 21:40:28 +03:00
. name = " rcar-pcie " ,
2014-05-12 14:57:48 +04:00
. of_match_table = rcar_pcie_of_match ,
2019-03-25 22:43:19 +03:00
. pm = & rcar_pcie_pm_ops ,
2014-05-12 14:57:48 +04:00
. suppress_bind_attrs = true ,
} ,
. probe = rcar_pcie_probe ,
} ;
2021-08-15 21:16:50 +03:00
# ifdef CONFIG_ARM
static DEFINE_SPINLOCK ( pmsr_lock ) ;
static int rcar_pcie_aarch32_abort_handler ( unsigned long addr ,
unsigned int fsr , struct pt_regs * regs )
{
unsigned long flags ;
u32 pmsr , val ;
int ret = 0 ;
spin_lock_irqsave ( & pmsr_lock , flags ) ;
2021-11-15 23:46:41 +03:00
if ( ! pcie_base | | pm_runtime_suspended ( pcie_dev ) ) {
2021-08-15 21:16:50 +03:00
ret = 1 ;
goto unlock_exit ;
}
pmsr = readl ( pcie_base + PMSR ) ;
/*
* Test if the PCIe controller received PM_ENTER_L1 DLLP and
* the PCIe controller is not in L1 link state . If true , apply
* fix , which will put the controller into L1 link state , from
* which it can return to L0s / L0 on its own .
*/
if ( ( pmsr & PMEL1RX ) & & ( ( pmsr & PMSTATE ) ! = PMSTATE_L1 ) ) {
writel ( L1IATN , pcie_base + PMCTLR ) ;
ret = readl_poll_timeout_atomic ( pcie_base + PMSR , val ,
val & L1FAEG , 10 , 1000 ) ;
WARN ( ret , " Timeout waiting for L1 link state, ret=%d \n " , ret ) ;
writel ( L1FAEG | PMEL1RX , pcie_base + PMSR ) ;
}
unlock_exit :
spin_unlock_irqrestore ( & pmsr_lock , flags ) ;
return ret ;
}
static const struct of_device_id rcar_pcie_abort_handler_of_match [ ] __initconst = {
{ . compatible = " renesas,pcie-r8a7779 " } ,
{ . compatible = " renesas,pcie-r8a7790 " } ,
{ . compatible = " renesas,pcie-r8a7791 " } ,
{ . compatible = " renesas,pcie-rcar-gen2 " } ,
{ } ,
} ;
static int __init rcar_pcie_init ( void )
{
if ( of_find_matching_node ( NULL , rcar_pcie_abort_handler_of_match ) ) {
# ifdef CONFIG_ARM_LPAE
hook_fault_code ( 17 , rcar_pcie_aarch32_abort_handler , SIGBUS , 0 ,
" asynchronous external abort " ) ;
# else
hook_fault_code ( 22 , rcar_pcie_aarch32_abort_handler , SIGBUS , 0 ,
" imprecise external abort " ) ;
# endif
}
return platform_driver_register ( & rcar_pcie_driver ) ;
}
device_initcall ( rcar_pcie_init ) ;
# else
2016-07-23 00:23:21 +03:00
builtin_platform_driver ( rcar_pcie_driver ) ;
2021-08-15 21:16:50 +03:00
# endif