2018-01-26 21:50:27 +03:00
// SPDX-License-Identifier: GPL-2.0
2014-05-12 14:57:48 +04:00
/*
* PCIe driver for Renesas R - Car SoCs
2020-05-07 15:33:13 +03:00
* Copyright ( C ) 2014 - 2020 Renesas Electronics Europe Ltd
2014-05-12 14:57:48 +04:00
*
* Based on :
* arch / sh / drivers / pci / pcie - sh7786 . c
* arch / sh / drivers / pci / ops - sh7786 . c
* Copyright ( C ) 2009 - 2011 Paul Mundt
*
2016-07-23 00:23:21 +03:00
* Author : Phil Edworthy < phil . edworthy @ renesas . com >
2014-05-12 14:57:48 +04:00
*/
2018-04-08 21:04:31 +03:00
# include <linux/bitops.h>
2014-05-12 14:57:48 +04:00
# include <linux/clk.h>
# include <linux/delay.h>
# include <linux/interrupt.h>
2014-05-12 14:57:49 +04:00
# include <linux/irq.h>
# include <linux/irqdomain.h>
2014-05-12 14:57:48 +04:00
# include <linux/kernel.h>
2016-07-23 00:23:21 +03:00
# include <linux/init.h>
2014-05-12 14:57:49 +04:00
# include <linux/msi.h>
2014-05-12 14:57:48 +04:00
# include <linux/of_address.h>
# include <linux/of_irq.h>
# include <linux/of_pci.h>
# include <linux/of_platform.h>
# include <linux/pci.h>
2018-05-03 22:40:54 +03:00
# include <linux/phy/phy.h>
2014-05-12 14:57:48 +04:00
# include <linux/platform_device.h>
2016-01-05 16:00:30 +03:00
# include <linux/pm_runtime.h>
2014-05-12 14:57:48 +04:00
# include <linux/slab.h>
2020-05-07 15:33:13 +03:00
# include "pcie-rcar.h"
2014-05-12 14:57:48 +04:00
2014-05-12 14:57:49 +04:00
struct rcar_msi {
DECLARE_BITMAP ( used , INT_PCI_MSI_NR ) ;
struct irq_domain * domain ;
2014-11-12 03:45:45 +03:00
struct msi_controller chip ;
2014-05-12 14:57:49 +04:00
unsigned long pages ;
struct mutex lock ;
int irq1 ;
int irq2 ;
} ;
2014-11-12 03:45:45 +03:00
static inline struct rcar_msi * to_rcar_msi ( struct msi_controller * chip )
2014-05-12 14:57:49 +04:00
{
return container_of ( chip , struct rcar_msi , chip ) ;
}
2014-05-12 14:57:48 +04:00
/* Structure representing the PCIe interface */
2020-05-07 15:33:13 +03:00
struct rcar_pcie_host {
struct rcar_pcie pcie ;
2014-05-12 14:57:48 +04:00
struct device * dev ;
2018-05-03 22:40:54 +03:00
struct phy * phy ;
2014-05-12 14:57:48 +04:00
void __iomem * base ;
2015-11-25 18:30:37 +03:00
struct list_head resources ;
2014-05-12 14:57:48 +04:00
int root_bus_nr ;
struct clk * bus_clk ;
2014-05-12 14:57:49 +04:00
struct rcar_msi msi ;
2020-05-07 15:33:13 +03:00
int ( * phy_init_fn ) ( struct rcar_pcie_host * host ) ;
2014-05-12 14:57:48 +04:00
} ;
static u32 rcar_read_conf ( struct rcar_pcie * pcie , int where )
{
2019-03-25 14:40:59 +03:00
unsigned int shift = BITS_PER_BYTE * ( where & 3 ) ;
2014-06-30 11:54:23 +04:00
u32 val = rcar_pci_read_reg ( pcie , where & ~ 3 ) ;
2014-05-12 14:57:48 +04:00
return val > > shift ;
}
/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
2020-05-07 15:33:13 +03:00
static int rcar_pcie_config_access ( struct rcar_pcie_host * host ,
2014-05-12 14:57:48 +04:00
unsigned char access_type , struct pci_bus * bus ,
unsigned int devfn , int where , u32 * data )
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie * pcie = & host - > pcie ;
2019-03-25 14:40:58 +03:00
unsigned int dev , func , reg , index ;
2014-05-12 14:57:48 +04:00
dev = PCI_SLOT ( devfn ) ;
func = PCI_FUNC ( devfn ) ;
reg = where & ~ 3 ;
index = reg / 4 ;
/*
* While each channel has its own memory - mapped extended config
* space , it ' s generally only accessible when in endpoint mode .
* When in root complex mode , the controller is unable to target
* itself with either type 0 or type 1 accesses , and indeed , any
* controller initiated target transfer to its own config space
* result in a completer abort .
*
* Each channel effectively only supports a single device , but as
* the same channel < - > device access works for any PCI_SLOT ( )
* value , we cheat a bit here and bind the controller ' s config
* space to devfn 0 in order to enable self - enumeration . In this
* case the regular ECAR / ECDR path is sidelined and the mangled
* config access itself is initiated as an internal bus transaction .
*/
if ( pci_is_root_bus ( bus ) ) {
if ( dev ! = 0 )
return PCIBIOS_DEVICE_NOT_FOUND ;
2014-06-30 11:54:23 +04:00
if ( access_type = = RCAR_PCI_ACCESS_READ ) {
* data = rcar_pci_read_reg ( pcie , PCICONF ( index ) ) ;
2014-05-12 14:57:48 +04:00
} else {
/* Keep an eye out for changes to the root bus number */
if ( pci_is_root_bus ( bus ) & & ( reg = = PCI_PRIMARY_BUS ) )
2020-05-07 15:33:13 +03:00
host - > root_bus_nr = * data & 0xff ;
2014-05-12 14:57:48 +04:00
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , * data , PCICONF ( index ) ) ;
2014-05-12 14:57:48 +04:00
}
return PCIBIOS_SUCCESSFUL ;
}
2020-05-07 15:33:13 +03:00
if ( host - > root_bus_nr < 0 )
2014-05-12 14:57:48 +04:00
return PCIBIOS_DEVICE_NOT_FOUND ;
/* Clear errors */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , rcar_pci_read_reg ( pcie , PCIEERRFR ) , PCIEERRFR ) ;
2014-05-12 14:57:48 +04:00
/* Set the PIO address */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , PCIE_CONF_BUS ( bus - > number ) |
PCIE_CONF_DEV ( dev ) | PCIE_CONF_FUNC ( func ) | reg , PCIECAR ) ;
2014-05-12 14:57:48 +04:00
/* Enable the configuration access */
2020-05-07 15:33:13 +03:00
if ( bus - > parent - > number = = host - > root_bus_nr )
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , CONFIG_SEND_ENABLE | TYPE0 , PCIECCTLR ) ;
2014-05-12 14:57:48 +04:00
else
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , CONFIG_SEND_ENABLE | TYPE1 , PCIECCTLR ) ;
2014-05-12 14:57:48 +04:00
/* Check for errors */
2014-06-30 11:54:23 +04:00
if ( rcar_pci_read_reg ( pcie , PCIEERRFR ) & UNSUPPORTED_REQUEST )
2014-05-12 14:57:48 +04:00
return PCIBIOS_DEVICE_NOT_FOUND ;
/* Check for master and target aborts */
if ( rcar_read_conf ( pcie , RCONF ( PCI_STATUS ) ) &
( PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT ) )
return PCIBIOS_DEVICE_NOT_FOUND ;
2014-06-30 11:54:23 +04:00
if ( access_type = = RCAR_PCI_ACCESS_READ )
* data = rcar_pci_read_reg ( pcie , PCIECDR ) ;
2014-05-12 14:57:48 +04:00
else
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , * data , PCIECDR ) ;
2014-05-12 14:57:48 +04:00
/* Disable the configuration access */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 0 , PCIECCTLR ) ;
2014-05-12 14:57:48 +04:00
return PCIBIOS_SUCCESSFUL ;
}
static int rcar_pcie_read_conf ( struct pci_bus * bus , unsigned int devfn ,
int where , int size , u32 * val )
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie_host * host = bus - > sysdata ;
2014-05-12 14:57:48 +04:00
int ret ;
2020-05-07 15:33:13 +03:00
ret = rcar_pcie_config_access ( host , RCAR_PCI_ACCESS_READ ,
2014-05-12 14:57:48 +04:00
bus , devfn , where , val ) ;
if ( ret ! = PCIBIOS_SUCCESSFUL ) {
* val = 0xffffffff ;
return ret ;
}
if ( size = = 1 )
2019-03-25 14:40:59 +03:00
* val = ( * val > > ( BITS_PER_BYTE * ( where & 3 ) ) ) & 0xff ;
2014-05-12 14:57:48 +04:00
else if ( size = = 2 )
2019-03-25 14:40:59 +03:00
* val = ( * val > > ( BITS_PER_BYTE * ( where & 2 ) ) ) & 0xffff ;
2014-05-12 14:57:48 +04:00
2019-03-25 14:41:00 +03:00
dev_dbg ( & bus - > dev , " pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x \n " ,
bus - > number , devfn , where , size , * val ) ;
2014-05-12 14:57:48 +04:00
return ret ;
}
/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
static int rcar_pcie_write_conf ( struct pci_bus * bus , unsigned int devfn ,
int where , int size , u32 val )
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie_host * host = bus - > sysdata ;
2019-03-25 14:40:58 +03:00
unsigned int shift ;
2014-05-12 14:57:48 +04:00
u32 data ;
2019-03-25 14:40:58 +03:00
int ret ;
2014-05-12 14:57:48 +04:00
2020-05-07 15:33:13 +03:00
ret = rcar_pcie_config_access ( host , RCAR_PCI_ACCESS_READ ,
2014-05-12 14:57:48 +04:00
bus , devfn , where , & data ) ;
if ( ret ! = PCIBIOS_SUCCESSFUL )
return ret ;
2019-03-25 14:41:00 +03:00
dev_dbg ( & bus - > dev , " pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x \n " ,
bus - > number , devfn , where , size , val ) ;
2014-05-12 14:57:48 +04:00
if ( size = = 1 ) {
2019-03-25 14:40:59 +03:00
shift = BITS_PER_BYTE * ( where & 3 ) ;
2014-05-12 14:57:48 +04:00
data & = ~ ( 0xff < < shift ) ;
data | = ( ( val & 0xff ) < < shift ) ;
} else if ( size = = 2 ) {
2019-03-25 14:40:59 +03:00
shift = BITS_PER_BYTE * ( where & 2 ) ;
2014-05-12 14:57:48 +04:00
data & = ~ ( 0xffff < < shift ) ;
data | = ( ( val & 0xffff ) < < shift ) ;
} else
data = val ;
2020-05-07 15:33:13 +03:00
ret = rcar_pcie_config_access ( host , RCAR_PCI_ACCESS_WRITE ,
2014-05-12 14:57:48 +04:00
bus , devfn , where , & data ) ;
return ret ;
}
static struct pci_ops rcar_pcie_ops = {
. read = rcar_pcie_read_conf ,
. write = rcar_pcie_write_conf ,
} ;
2020-05-07 15:33:13 +03:00
static int rcar_pcie_setup ( struct list_head * resource ,
struct rcar_pcie_host * host )
2014-05-12 14:57:48 +04:00
{
2015-11-25 18:30:37 +03:00
struct resource_entry * win ;
int i = 0 ;
2014-05-12 14:57:48 +04:00
/* Setup PCI resources */
2020-05-07 15:33:13 +03:00
resource_list_for_each_entry ( win , & host - > resources ) {
2015-11-25 18:30:37 +03:00
struct resource * res = win - > res ;
2014-05-12 14:57:48 +04:00
if ( ! res - > flags )
continue ;
2015-11-25 18:30:37 +03:00
switch ( resource_type ( res ) ) {
case IORESOURCE_IO :
case IORESOURCE_MEM :
2020-05-07 15:33:13 +03:00
rcar_pcie_set_outbound ( & host - > pcie , i , win ) ;
2015-11-25 18:30:37 +03:00
i + + ;
break ;
case IORESOURCE_BUS :
2020-05-07 15:33:13 +03:00
host - > root_bus_nr = res - > start ;
2015-11-25 18:30:37 +03:00
break ;
default :
continue ;
2015-10-02 13:25:04 +03:00
}
2015-10-02 13:25:05 +03:00
pci_add_resource ( resource , res ) ;
2014-05-12 14:57:48 +04:00
}
return 1 ;
}
2016-09-22 23:20:18 +03:00
static void rcar_pcie_force_speedup ( struct rcar_pcie * pcie )
{
2016-10-10 22:31:28 +03:00
struct device * dev = pcie - > dev ;
2016-09-22 23:20:18 +03:00
unsigned int timeout = 1000 ;
u32 macsr ;
if ( ( rcar_pci_read_reg ( pcie , MACS2R ) & LINK_SPEED ) ! = LINK_SPEED_5_0GTS )
return ;
if ( rcar_pci_read_reg ( pcie , MACCTLR ) & SPEED_CHANGE ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " Speed change already in progress \n " ) ;
2016-09-22 23:20:18 +03:00
return ;
}
macsr = rcar_pci_read_reg ( pcie , MACSR ) ;
if ( ( macsr & LINK_SPEED ) = = LINK_SPEED_5_0GTS )
goto done ;
/* Set target link speed to 5.0 GT/s */
rcar_rmw32 ( pcie , EXPCAP ( 12 ) , PCI_EXP_LNKSTA_CLS ,
PCI_EXP_LNKSTA_CLS_5_0GB ) ;
/* Set speed change reason as intentional factor */
rcar_rmw32 ( pcie , MACCGSPSETR , SPCNGRSN , 0 ) ;
/* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
if ( macsr & ( SPCHGFIN | SPCHGSUC | SPCHGFAIL ) )
rcar_pci_write_reg ( pcie , macsr , MACSR ) ;
/* Start link speed change */
rcar_rmw32 ( pcie , MACCTLR , SPEED_CHANGE , SPEED_CHANGE ) ;
while ( timeout - - ) {
macsr = rcar_pci_read_reg ( pcie , MACSR ) ;
if ( macsr & SPCHGFIN ) {
/* Clear the interrupt bits */
rcar_pci_write_reg ( pcie , macsr , MACSR ) ;
if ( macsr & SPCHGFAIL )
2016-10-10 22:31:28 +03:00
dev_err ( dev , " Speed change failed \n " ) ;
2016-09-22 23:20:18 +03:00
goto done ;
}
msleep ( 1 ) ;
2018-03-07 18:42:39 +03:00
}
2016-09-22 23:20:18 +03:00
2016-10-10 22:31:28 +03:00
dev_err ( dev , " Speed change timed out \n " ) ;
2016-09-22 23:20:18 +03:00
done :
2016-10-10 22:31:28 +03:00
dev_info ( dev , " Current link speed is %s GT/s \n " ,
2016-09-22 23:20:18 +03:00
( macsr & LINK_SPEED ) = = LINK_SPEED_5_0GTS ? " 5 " : " 2.5 " ) ;
}
2020-05-07 15:33:13 +03:00
static void rcar_pcie_hw_enable ( struct rcar_pcie_host * host )
2020-03-14 22:12:32 +03:00
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie * pcie = & host - > pcie ;
2020-03-14 22:12:32 +03:00
struct resource_entry * win ;
LIST_HEAD ( res ) ;
int i = 0 ;
/* Try setting 5 GT/s link speed */
2020-05-07 15:33:13 +03:00
rcar_pcie_force_speedup ( pcie ) ;
2020-03-14 22:12:32 +03:00
/* Setup PCI resources */
2020-05-07 15:33:13 +03:00
resource_list_for_each_entry ( win , & host - > resources ) {
2020-03-14 22:12:32 +03:00
struct resource * res = win - > res ;
if ( ! res - > flags )
continue ;
switch ( resource_type ( res ) ) {
case IORESOURCE_IO :
case IORESOURCE_MEM :
2020-05-07 15:33:13 +03:00
rcar_pcie_set_outbound ( pcie , i , win ) ;
2020-03-14 22:12:32 +03:00
i + + ;
break ;
}
}
}
2020-05-07 15:33:13 +03:00
static int rcar_pcie_enable ( struct rcar_pcie_host * host )
2014-05-12 14:57:48 +04:00
{
2020-05-07 15:33:13 +03:00
struct pci_host_bridge * bridge = pci_host_bridge_from_priv ( host ) ;
struct rcar_pcie * pcie = & host - > pcie ;
2014-05-12 14:57:48 +04:00
2016-09-22 23:20:18 +03:00
/* Try setting 5 GT/s link speed */
rcar_pcie_force_speedup ( pcie ) ;
2020-05-07 15:33:13 +03:00
rcar_pcie_setup ( & bridge - > windows , host ) ;
2015-10-02 13:25:05 +03:00
PCI: Remove PCI_REASSIGN_ALL_RSRC use on arm and arm64
On arm, PCI_REASSIGN_ALL_RSRC is used only in pcibios_assign_all_busses(),
which helps decide whether to reconfigure bridge bus numbers. It has
nothing to do with BAR assignments. On arm64 and powerpc,
pcibios_assign_all_busses() tests PCI_REASSIGN_ALL_BUS, which makes more
sense.
Align arm with arm64 and powerpc, so they all use PCI_REASSIGN_ALL_BUS for
pcibios_assign_all_busses().
Remove PCI_REASSIGN_ALL_RSRC from the generic, Tegra, Versatile, and
R-Car drivers. These drivers are used only on arm or arm64, where
PCI_REASSIGN_ALL_RSRC is not used after this change, so removing it
should have no effect.
No functional change intended.
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Reviewed-by: Manikanta Maddireddy <mmaddireddy@nvidia.com>
Reviewed-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
2017-11-30 20:21:57 +03:00
pci_add_flags ( PCI_REASSIGN_ALL_BUS ) ;
2015-10-02 13:25:05 +03:00
2020-05-07 15:33:13 +03:00
bridge - > sysdata = host ;
bridge - > busnr = host - > root_bus_nr ;
2017-06-28 23:13:57 +03:00
bridge - > ops = & rcar_pcie_ops ;
2017-06-28 23:14:06 +03:00
bridge - > map_irq = of_irq_parse_and_map_pci ;
bridge - > swizzle_irq = pci_common_swizzle ;
2015-10-02 13:25:05 +03:00
if ( IS_ENABLED ( CONFIG_PCI_MSI ) )
2020-05-07 15:33:13 +03:00
bridge - > msi = & host - > msi . chip ;
2015-10-02 13:25:05 +03:00
2020-05-23 02:48:29 +03:00
return pci_host_probe ( bridge ) ;
2014-05-12 14:57:48 +04:00
}
static int phy_wait_for_ack ( struct rcar_pcie * pcie )
{
2016-10-10 22:31:28 +03:00
struct device * dev = pcie - > dev ;
2014-05-12 14:57:48 +04:00
unsigned int timeout = 100 ;
while ( timeout - - ) {
2014-06-30 11:54:23 +04:00
if ( rcar_pci_read_reg ( pcie , H1_PCIEPHYADRR ) & PHY_ACK )
2014-05-12 14:57:48 +04:00
return 0 ;
udelay ( 100 ) ;
}
2016-10-10 22:31:28 +03:00
dev_err ( dev , " Access to PCIe phy timed out \n " ) ;
2014-05-12 14:57:48 +04:00
return - ETIMEDOUT ;
}
static void phy_write_reg ( struct rcar_pcie * pcie ,
2019-03-25 14:40:58 +03:00
unsigned int rate , u32 addr ,
unsigned int lane , u32 data )
2014-05-12 14:57:48 +04:00
{
2019-03-25 14:40:58 +03:00
u32 phyaddr ;
2014-05-12 14:57:48 +04:00
phyaddr = WRITE_CMD |
( ( rate & 1 ) < < RATE_POS ) |
( ( lane & 0xf ) < < LANE_POS ) |
( ( addr & 0xff ) < < ADR_POS ) ;
/* Set write data */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , data , H1_PCIEPHYDOUTR ) ;
rcar_pci_write_reg ( pcie , phyaddr , H1_PCIEPHYADRR ) ;
2014-05-12 14:57:48 +04:00
/* Ignore errors as they will be dealt with if the data link is down */
phy_wait_for_ack ( pcie ) ;
/* Clear command */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 0 , H1_PCIEPHYDOUTR ) ;
rcar_pci_write_reg ( pcie , 0 , H1_PCIEPHYADRR ) ;
2014-05-12 14:57:48 +04:00
/* Ignore errors as they will be dealt with if the data link is down */
phy_wait_for_ack ( pcie ) ;
}
static int rcar_pcie_hw_init ( struct rcar_pcie * pcie )
{
int err ;
/* Begin initialization */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 0 , PCIETCTLR ) ;
2014-05-12 14:57:48 +04:00
/* Set mode */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 1 , PCIEMSR ) ;
2014-05-12 14:57:48 +04:00
2018-05-03 22:36:37 +03:00
err = rcar_pcie_wait_for_phyrdy ( pcie ) ;
if ( err )
return err ;
2014-05-12 14:57:48 +04:00
/*
* Initial header for port config space is type 1 , set the device
* class to match . Hardware takes care of propagating the IDSETR
* settings , so there is no need to bother with a quirk .
*/
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , PCI_CLASS_BRIDGE_PCI < < 16 , IDSETR1 ) ;
2014-05-12 14:57:48 +04:00
/*
* Setup Secondary Bus Number & Subordinate Bus Number , even though
* they aren ' t used , to avoid bridge being detected as broken .
*/
rcar_rmw32 ( pcie , RCONF ( PCI_SECONDARY_BUS ) , 0xff , 1 ) ;
rcar_rmw32 ( pcie , RCONF ( PCI_SUBORDINATE_BUS ) , 0xff , 1 ) ;
/* Initialize default capabilities. */
2014-06-30 11:54:22 +04:00
rcar_rmw32 ( pcie , REXPCAP ( 0 ) , 0xff , PCI_CAP_ID_EXP ) ;
2014-05-12 14:57:48 +04:00
rcar_rmw32 ( pcie , REXPCAP ( PCI_EXP_FLAGS ) ,
PCI_EXP_FLAGS_TYPE , PCI_EXP_TYPE_ROOT_PORT < < 4 ) ;
rcar_rmw32 ( pcie , RCONF ( PCI_HEADER_TYPE ) , 0x7f ,
PCI_HEADER_TYPE_BRIDGE ) ;
/* Enable data link layer active state reporting */
2014-06-30 11:54:22 +04:00
rcar_rmw32 ( pcie , REXPCAP ( PCI_EXP_LNKCAP ) , PCI_EXP_LNKCAP_DLLLARC ,
PCI_EXP_LNKCAP_DLLLARC ) ;
2014-05-12 14:57:48 +04:00
/* Write out the physical slot number = 0 */
rcar_rmw32 ( pcie , REXPCAP ( PCI_EXP_SLTCAP ) , PCI_EXP_SLTCAP_PSN , 0 ) ;
/* Set the completion timer timeout to the maximum 50ms. */
2014-06-30 11:54:23 +04:00
rcar_rmw32 ( pcie , TLCTLR + 1 , 0x3f , 50 ) ;
2014-05-12 14:57:48 +04:00
/* Terminate list of capabilities (Next Capability Offset=0) */
2014-06-30 11:54:22 +04:00
rcar_rmw32 ( pcie , RVCCAP ( 0 ) , 0xfff00000 , 0 ) ;
2014-05-12 14:57:48 +04:00
2014-05-12 14:57:49 +04:00
/* Enable MSI */
if ( IS_ENABLED ( CONFIG_PCI_MSI ) )
2015-02-02 08:09:39 +03:00
rcar_pci_write_reg ( pcie , 0x801f0000 , PCIEMSITXR ) ;
2014-05-12 14:57:49 +04:00
2019-11-05 13:51:29 +03:00
rcar_pci_write_reg ( pcie , MACCTLR_INIT_VAL , MACCTLR ) ;
2014-05-12 14:57:48 +04:00
/* Finish initialization - establish a PCI Express link */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , CFINIT , PCIETCTLR ) ;
2014-05-12 14:57:48 +04:00
/* This will timeout if we don't have a link. */
err = rcar_pcie_wait_for_dl ( pcie ) ;
if ( err )
return err ;
/* Enable INTx interrupts */
rcar_rmw32 ( pcie , PCIEINTXR , 0 , 0xF < < 8 ) ;
wmb ( ) ;
return 0 ;
}
2020-05-07 15:33:13 +03:00
static int rcar_pcie_phy_init_h1 ( struct rcar_pcie_host * host )
2014-05-12 14:57:48 +04:00
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie * pcie = & host - > pcie ;
2014-05-12 14:57:48 +04:00
/* Initialize the phy */
phy_write_reg ( pcie , 0 , 0x42 , 0x1 , 0x0EC34191 ) ;
phy_write_reg ( pcie , 1 , 0x42 , 0x1 , 0x0EC34180 ) ;
phy_write_reg ( pcie , 0 , 0x43 , 0x1 , 0x00210188 ) ;
phy_write_reg ( pcie , 1 , 0x43 , 0x1 , 0x00210188 ) ;
phy_write_reg ( pcie , 0 , 0x44 , 0x1 , 0x015C0014 ) ;
phy_write_reg ( pcie , 1 , 0x44 , 0x1 , 0x015C0014 ) ;
phy_write_reg ( pcie , 1 , 0x4C , 0x1 , 0x786174A0 ) ;
phy_write_reg ( pcie , 1 , 0x4D , 0x1 , 0x048000BB ) ;
phy_write_reg ( pcie , 0 , 0x51 , 0x1 , 0x079EC062 ) ;
phy_write_reg ( pcie , 0 , 0x52 , 0x1 , 0x20000000 ) ;
phy_write_reg ( pcie , 1 , 0x52 , 0x1 , 0x20000000 ) ;
phy_write_reg ( pcie , 1 , 0x56 , 0x1 , 0x00003806 ) ;
phy_write_reg ( pcie , 0 , 0x60 , 0x1 , 0x004B03A5 ) ;
phy_write_reg ( pcie , 0 , 0x64 , 0x1 , 0x3F0F1F0F ) ;
phy_write_reg ( pcie , 0 , 0x66 , 0x1 , 0x00008000 ) ;
2018-05-03 22:43:13 +03:00
return 0 ;
2014-05-12 14:57:48 +04:00
}
2020-05-07 15:33:13 +03:00
static int rcar_pcie_phy_init_gen2 ( struct rcar_pcie_host * host )
2016-01-05 16:00:31 +03:00
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie * pcie = & host - > pcie ;
2016-01-05 16:00:31 +03:00
/*
* These settings come from the R - Car Series , 2 nd Generation User ' s
* Manual , section 50.3 .1 ( 2 ) Initialization of the physical layer .
*/
rcar_pci_write_reg ( pcie , 0x000f0030 , GEN2_PCIEPHYADDR ) ;
rcar_pci_write_reg ( pcie , 0x00381203 , GEN2_PCIEPHYDATA ) ;
rcar_pci_write_reg ( pcie , 0x00000001 , GEN2_PCIEPHYCTRL ) ;
rcar_pci_write_reg ( pcie , 0x00000006 , GEN2_PCIEPHYCTRL ) ;
rcar_pci_write_reg ( pcie , 0x000f0054 , GEN2_PCIEPHYADDR ) ;
/* The following value is for DC connection, no termination resistor */
rcar_pci_write_reg ( pcie , 0x13802007 , GEN2_PCIEPHYDATA ) ;
rcar_pci_write_reg ( pcie , 0x00000001 , GEN2_PCIEPHYCTRL ) ;
rcar_pci_write_reg ( pcie , 0x00000006 , GEN2_PCIEPHYCTRL ) ;
2018-05-03 22:43:13 +03:00
return 0 ;
2016-01-05 16:00:31 +03:00
}
2020-05-07 15:33:13 +03:00
static int rcar_pcie_phy_init_gen3 ( struct rcar_pcie_host * host )
2018-05-03 22:40:54 +03:00
{
int err ;
2020-05-07 15:33:13 +03:00
err = phy_init ( host - > phy ) ;
2018-05-03 22:40:54 +03:00
if ( err )
return err ;
2020-05-07 15:33:13 +03:00
err = phy_power_on ( host - > phy ) ;
2018-06-29 21:48:15 +03:00
if ( err )
2020-05-07 15:33:13 +03:00
phy_exit ( host - > phy ) ;
2018-06-29 21:48:15 +03:00
return err ;
2016-01-05 16:00:31 +03:00
}
2014-05-12 14:57:49 +04:00
static int rcar_msi_alloc ( struct rcar_msi * chip )
{
int msi ;
mutex_lock ( & chip - > lock ) ;
msi = find_first_zero_bit ( chip - > used , INT_PCI_MSI_NR ) ;
if ( msi < INT_PCI_MSI_NR )
set_bit ( msi , chip - > used ) ;
else
msi = - ENOSPC ;
mutex_unlock ( & chip - > lock ) ;
return msi ;
}
2016-09-08 22:32:59 +03:00
static int rcar_msi_alloc_region ( struct rcar_msi * chip , int no_irqs )
{
int msi ;
mutex_lock ( & chip - > lock ) ;
msi = bitmap_find_free_region ( chip - > used , INT_PCI_MSI_NR ,
order_base_2 ( no_irqs ) ) ;
mutex_unlock ( & chip - > lock ) ;
return msi ;
}
2014-05-12 14:57:49 +04:00
static void rcar_msi_free ( struct rcar_msi * chip , unsigned long irq )
{
mutex_lock ( & chip - > lock ) ;
clear_bit ( irq , chip - > used ) ;
mutex_unlock ( & chip - > lock ) ;
}
static irqreturn_t rcar_pcie_msi_irq ( int irq , void * data )
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie_host * host = data ;
struct rcar_pcie * pcie = & host - > pcie ;
struct rcar_msi * msi = & host - > msi ;
2016-10-10 22:31:28 +03:00
struct device * dev = pcie - > dev ;
2014-05-12 14:57:49 +04:00
unsigned long reg ;
2014-06-30 11:54:23 +04:00
reg = rcar_pci_read_reg ( pcie , PCIEMSIFR ) ;
2014-05-12 14:57:49 +04:00
/* MSI & INTx share an interrupt - we only handle MSI here */
if ( ! reg )
return IRQ_NONE ;
while ( reg ) {
unsigned int index = find_first_bit ( & reg , 32 ) ;
2019-03-17 12:34:45 +03:00
unsigned int msi_irq ;
2014-05-12 14:57:49 +04:00
/* clear the interrupt */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 1 < < index , PCIEMSIFR ) ;
2014-05-12 14:57:49 +04:00
2019-03-17 12:34:45 +03:00
msi_irq = irq_find_mapping ( msi - > domain , index ) ;
if ( msi_irq ) {
2014-05-12 14:57:49 +04:00
if ( test_bit ( index , msi - > used ) )
2019-03-17 12:34:45 +03:00
generic_handle_irq ( msi_irq ) ;
2014-05-12 14:57:49 +04:00
else
2016-10-10 22:31:28 +03:00
dev_info ( dev , " unhandled MSI \n " ) ;
2014-05-12 14:57:49 +04:00
} else {
/* Unknown MSI, just clear it */
2016-10-10 22:31:28 +03:00
dev_dbg ( dev , " unexpected MSI \n " ) ;
2014-05-12 14:57:49 +04:00
}
/* see if there's any more pending in this vector */
2014-06-30 11:54:23 +04:00
reg = rcar_pci_read_reg ( pcie , PCIEMSIFR ) ;
2014-05-12 14:57:49 +04:00
}
return IRQ_HANDLED ;
}
2014-11-12 03:45:45 +03:00
static int rcar_msi_setup_irq ( struct msi_controller * chip , struct pci_dev * pdev ,
2014-05-12 14:57:49 +04:00
struct msi_desc * desc )
{
struct rcar_msi * msi = to_rcar_msi ( chip ) ;
2020-05-07 15:33:13 +03:00
struct rcar_pcie_host * host = container_of ( chip , struct rcar_pcie_host ,
msi . chip ) ;
struct rcar_pcie * pcie = & host - > pcie ;
2014-05-12 14:57:49 +04:00
struct msi_msg msg ;
unsigned int irq ;
int hwirq ;
hwirq = rcar_msi_alloc ( msi ) ;
if ( hwirq < 0 )
return hwirq ;
2016-09-08 22:32:59 +03:00
irq = irq_find_mapping ( msi - > domain , hwirq ) ;
2014-05-12 14:57:49 +04:00
if ( ! irq ) {
rcar_msi_free ( msi , hwirq ) ;
return - EINVAL ;
}
irq_set_msi_desc ( irq , desc ) ;
2014-06-30 11:54:23 +04:00
msg . address_lo = rcar_pci_read_reg ( pcie , PCIEMSIALR ) & ~ MSIFE ;
msg . address_hi = rcar_pci_read_reg ( pcie , PCIEMSIAUR ) ;
2014-05-12 14:57:49 +04:00
msg . data = hwirq ;
2014-11-09 18:10:34 +03:00
pci_write_msi_msg ( irq , & msg ) ;
2014-05-12 14:57:49 +04:00
return 0 ;
}
2016-09-08 22:32:59 +03:00
static int rcar_msi_setup_irqs ( struct msi_controller * chip ,
struct pci_dev * pdev , int nvec , int type )
{
struct rcar_msi * msi = to_rcar_msi ( chip ) ;
2020-05-07 15:33:13 +03:00
struct rcar_pcie_host * host = container_of ( chip , struct rcar_pcie_host ,
msi . chip ) ;
struct rcar_pcie * pcie = & host - > pcie ;
2016-09-08 22:32:59 +03:00
struct msi_desc * desc ;
struct msi_msg msg ;
unsigned int irq ;
int hwirq ;
int i ;
/* MSI-X interrupts are not supported */
if ( type = = PCI_CAP_ID_MSIX )
return - EINVAL ;
WARN_ON ( ! list_is_singular ( & pdev - > dev . msi_list ) ) ;
desc = list_entry ( pdev - > dev . msi_list . next , struct msi_desc , list ) ;
hwirq = rcar_msi_alloc_region ( msi , nvec ) ;
if ( hwirq < 0 )
return - ENOSPC ;
irq = irq_find_mapping ( msi - > domain , hwirq ) ;
if ( ! irq )
return - ENOSPC ;
for ( i = 0 ; i < nvec ; i + + ) {
/*
* irq_create_mapping ( ) called from rcar_pcie_probe ( ) pre -
* allocates descs , so there is no need to allocate descs here .
* We can therefore assume that if irq_find_mapping ( ) above
* returns non - zero , then the descs are also successfully
* allocated .
*/
if ( irq_set_msi_desc_off ( irq , i , desc ) ) {
/* TODO: clear */
return - EINVAL ;
}
}
desc - > nvec_used = nvec ;
desc - > msi_attrib . multiple = order_base_2 ( nvec ) ;
msg . address_lo = rcar_pci_read_reg ( pcie , PCIEMSIALR ) & ~ MSIFE ;
msg . address_hi = rcar_pci_read_reg ( pcie , PCIEMSIAUR ) ;
msg . data = hwirq ;
pci_write_msi_msg ( irq , & msg ) ;
return 0 ;
}
2014-11-12 03:45:45 +03:00
static void rcar_msi_teardown_irq ( struct msi_controller * chip , unsigned int irq )
2014-05-12 14:57:49 +04:00
{
struct rcar_msi * msi = to_rcar_msi ( chip ) ;
struct irq_data * d = irq_get_irq_data ( irq ) ;
rcar_msi_free ( msi , d - > hwirq ) ;
}
static struct irq_chip rcar_msi_irq_chip = {
. name = " R-Car PCIe MSI " ,
2014-11-23 14:23:20 +03:00
. irq_enable = pci_msi_unmask_irq ,
. irq_disable = pci_msi_mask_irq ,
. irq_mask = pci_msi_mask_irq ,
. irq_unmask = pci_msi_unmask_irq ,
2014-05-12 14:57:49 +04:00
} ;
static int rcar_msi_map ( struct irq_domain * domain , unsigned int irq ,
irq_hw_number_t hwirq )
{
irq_set_chip_and_handler ( irq , & rcar_msi_irq_chip , handle_simple_irq ) ;
irq_set_chip_data ( irq , domain - > host_data ) ;
return 0 ;
}
static const struct irq_domain_ops msi_domain_ops = {
. map = rcar_msi_map ,
} ;
2020-05-07 15:33:13 +03:00
static void rcar_pcie_unmap_msi ( struct rcar_pcie_host * host )
2018-05-24 17:36:23 +03:00
{
2020-05-07 15:33:13 +03:00
struct rcar_msi * msi = & host - > msi ;
2018-05-24 17:36:23 +03:00
int i , irq ;
for ( i = 0 ; i < INT_PCI_MSI_NR ; i + + ) {
irq = irq_find_mapping ( msi - > domain , i ) ;
if ( irq > 0 )
irq_dispose_mapping ( irq ) ;
}
irq_domain_remove ( msi - > domain ) ;
}
2020-05-07 15:33:13 +03:00
static void rcar_pcie_hw_enable_msi ( struct rcar_pcie_host * host )
2020-03-14 22:12:32 +03:00
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie * pcie = & host - > pcie ;
struct rcar_msi * msi = & host - > msi ;
2020-03-14 22:12:32 +03:00
unsigned long base ;
/* setup MSI data target */
base = virt_to_phys ( ( void * ) msi - > pages ) ;
rcar_pci_write_reg ( pcie , lower_32_bits ( base ) | MSIFE , PCIEMSIALR ) ;
rcar_pci_write_reg ( pcie , upper_32_bits ( base ) , PCIEMSIAUR ) ;
/* enable all MSI interrupts */
rcar_pci_write_reg ( pcie , 0xffffffff , PCIEMSIIER ) ;
}
2020-05-07 15:33:13 +03:00
static int rcar_pcie_enable_msi ( struct rcar_pcie_host * host )
2014-05-12 14:57:49 +04:00
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie * pcie = & host - > pcie ;
2016-10-10 22:31:28 +03:00
struct device * dev = pcie - > dev ;
2020-05-07 15:33:13 +03:00
struct rcar_msi * msi = & host - > msi ;
2016-09-08 22:32:59 +03:00
int err , i ;
2014-05-12 14:57:49 +04:00
mutex_init ( & msi - > lock ) ;
2016-10-10 22:31:28 +03:00
msi - > chip . dev = dev ;
2014-05-12 14:57:49 +04:00
msi - > chip . setup_irq = rcar_msi_setup_irq ;
2016-09-08 22:32:59 +03:00
msi - > chip . setup_irqs = rcar_msi_setup_irqs ;
2014-05-12 14:57:49 +04:00
msi - > chip . teardown_irq = rcar_msi_teardown_irq ;
2016-10-10 22:31:28 +03:00
msi - > domain = irq_domain_add_linear ( dev - > of_node , INT_PCI_MSI_NR ,
2014-05-12 14:57:49 +04:00
& msi_domain_ops , & msi - > chip ) ;
if ( ! msi - > domain ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " failed to create IRQ domain \n " ) ;
2014-05-12 14:57:49 +04:00
return - ENOMEM ;
}
2016-09-08 22:32:59 +03:00
for ( i = 0 ; i < INT_PCI_MSI_NR ; i + + )
irq_create_mapping ( msi - > domain , i ) ;
2014-05-12 14:57:49 +04:00
/* Two irqs are for MSI, but they are also used for non-MSI irqs */
2016-10-10 22:31:28 +03:00
err = devm_request_irq ( dev , msi - > irq1 , rcar_pcie_msi_irq ,
2015-12-10 22:18:20 +03:00
IRQF_SHARED | IRQF_NO_THREAD ,
2020-05-07 15:33:13 +03:00
rcar_msi_irq_chip . name , host ) ;
2014-05-12 14:57:49 +04:00
if ( err < 0 ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " failed to request IRQ: %d \n " , err ) ;
2014-05-12 14:57:49 +04:00
goto err ;
}
2016-10-10 22:31:28 +03:00
err = devm_request_irq ( dev , msi - > irq2 , rcar_pcie_msi_irq ,
2015-12-10 22:18:20 +03:00
IRQF_SHARED | IRQF_NO_THREAD ,
2020-05-07 15:33:13 +03:00
rcar_msi_irq_chip . name , host ) ;
2014-05-12 14:57:49 +04:00
if ( err < 0 ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " failed to request IRQ: %d \n " , err ) ;
2014-05-12 14:57:49 +04:00
goto err ;
}
/* setup MSI data target */
msi - > pages = __get_free_pages ( GFP_KERNEL , 0 ) ;
2020-05-07 15:33:13 +03:00
rcar_pcie_hw_enable_msi ( host ) ;
2014-05-12 14:57:49 +04:00
return 0 ;
err :
2020-05-07 15:33:13 +03:00
rcar_pcie_unmap_msi ( host ) ;
2014-05-12 14:57:49 +04:00
return err ;
}
2020-05-07 15:33:13 +03:00
static void rcar_pcie_teardown_msi ( struct rcar_pcie_host * host )
2018-05-24 17:36:21 +03:00
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie * pcie = & host - > pcie ;
struct rcar_msi * msi = & host - > msi ;
2018-05-24 17:36:21 +03:00
/* Disable all MSI interrupts */
rcar_pci_write_reg ( pcie , 0 , PCIEMSIIER ) ;
/* Disable address decoding of the MSI interrupt, MSIFE */
rcar_pci_write_reg ( pcie , 0 , PCIEMSIALR ) ;
free_pages ( msi - > pages , 0 ) ;
2020-05-07 15:33:13 +03:00
rcar_pcie_unmap_msi ( host ) ;
2018-05-24 17:36:21 +03:00
}
2020-05-07 15:33:13 +03:00
static int rcar_pcie_get_resources ( struct rcar_pcie_host * host )
2014-05-12 14:57:48 +04:00
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie * pcie = & host - > pcie ;
2016-10-10 22:31:28 +03:00
struct device * dev = pcie - > dev ;
2014-05-12 14:57:48 +04:00
struct resource res ;
2014-05-12 14:57:49 +04:00
int err , i ;
2014-05-12 14:57:48 +04:00
2020-05-07 15:33:13 +03:00
host - > phy = devm_phy_optional_get ( dev , " pcie " ) ;
if ( IS_ERR ( host - > phy ) )
return PTR_ERR ( host - > phy ) ;
2018-05-03 22:40:54 +03:00
2016-10-10 22:31:28 +03:00
err = of_address_to_resource ( dev - > of_node , 0 , & res ) ;
2014-05-12 14:57:48 +04:00
if ( err )
return err ;
2016-10-10 22:31:28 +03:00
pcie - > base = devm_ioremap_resource ( dev , & res ) ;
2016-08-22 22:16:38 +03:00
if ( IS_ERR ( pcie - > base ) )
return PTR_ERR ( pcie - > base ) ;
2020-05-07 15:33:13 +03:00
host - > bus_clk = devm_clk_get ( dev , " pcie_bus " ) ;
if ( IS_ERR ( host - > bus_clk ) ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " cannot get pcie bus clock \n " ) ;
2020-05-07 15:33:13 +03:00
return PTR_ERR ( host - > bus_clk ) ;
2014-05-12 14:57:48 +04:00
}
2016-10-10 22:31:28 +03:00
i = irq_of_parse_and_map ( dev - > of_node , 0 ) ;
2014-11-15 01:21:53 +03:00
if ( ! i ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " cannot get platform resources for msi interrupt \n " ) ;
2014-05-12 14:57:49 +04:00
err = - ENOENT ;
2018-05-24 17:36:20 +03:00
goto err_irq1 ;
2014-05-12 14:57:49 +04:00
}
2020-05-07 15:33:13 +03:00
host - > msi . irq1 = i ;
2014-05-12 14:57:49 +04:00
2016-10-10 22:31:28 +03:00
i = irq_of_parse_and_map ( dev - > of_node , 1 ) ;
2014-11-15 01:21:53 +03:00
if ( ! i ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " cannot get platform resources for msi interrupt \n " ) ;
2014-05-12 14:57:49 +04:00
err = - ENOENT ;
2018-05-24 17:36:20 +03:00
goto err_irq2 ;
2014-05-12 14:57:49 +04:00
}
2020-05-07 15:33:13 +03:00
host - > msi . irq2 = i ;
2014-05-12 14:57:49 +04:00
2014-05-12 14:57:48 +04:00
return 0 ;
2018-05-24 17:36:20 +03:00
err_irq2 :
2020-05-07 15:33:13 +03:00
irq_dispose_mapping ( host - > msi . irq1 ) ;
2018-05-24 17:36:20 +03:00
err_irq1 :
2014-05-12 14:57:48 +04:00
return err ;
}
static int rcar_pcie_inbound_ranges ( struct rcar_pcie * pcie ,
2019-10-28 19:32:55 +03:00
struct resource_entry * entry ,
2014-05-12 14:57:48 +04:00
int * index )
{
2019-10-28 19:32:55 +03:00
u64 restype = entry - > res - > flags ;
u64 cpu_addr = entry - > res - > start ;
u64 cpu_end = entry - > res - > end ;
u64 pci_addr = entry - > res - > start - entry - > offset ;
2014-05-12 14:57:48 +04:00
u32 flags = LAM_64BIT | LAR_ENABLE ;
u64 mask ;
2019-10-28 19:32:55 +03:00
u64 size = resource_size ( entry - > res ) ;
2014-05-12 14:57:48 +04:00
int idx = * index ;
if ( restype & IORESOURCE_PREFETCH )
flags | = LAM_PREFETCH ;
while ( cpu_addr < cpu_end ) {
2019-10-26 21:26:58 +03:00
if ( idx > = MAX_NR_INBOUND_MAPS - 1 ) {
dev_err ( pcie - > dev , " Failed to map inbound regions! \n " ) ;
return - EINVAL ;
}
2019-10-26 21:26:59 +03:00
/*
* If the size of the range is larger than the alignment of
* the start address , we have to use multiple entries to
* perform the mapping .
*/
if ( cpu_addr > 0 ) {
unsigned long nr_zeros = __ffs64 ( cpu_addr ) ;
u64 alignment = 1ULL < < nr_zeros ;
2019-11-28 17:54:53 +03:00
size = min ( size , alignment ) ;
2019-10-26 21:26:59 +03:00
}
/* Hardware supports max 4GiB inbound region */
size = min ( size , 1ULL < < 32 ) ;
mask = roundup_pow_of_two ( size ) - 1 ;
mask & = ~ 0xf ;
2020-05-07 15:33:13 +03:00
rcar_pcie_set_inbound ( pcie , cpu_addr , pci_addr ,
lower_32_bits ( mask ) | flags , idx , true ) ;
2014-05-12 14:57:48 +04:00
pci_addr + = size ;
cpu_addr + = size ;
idx + = 2 ;
}
* index = idx ;
return 0 ;
}
2020-05-07 15:33:13 +03:00
static int rcar_pcie_parse_map_dma_ranges ( struct rcar_pcie_host * host )
2014-05-12 14:57:48 +04:00
{
2020-05-07 15:33:13 +03:00
struct pci_host_bridge * bridge = pci_host_bridge_from_priv ( host ) ;
2019-10-28 19:32:55 +03:00
struct resource_entry * entry ;
int index = 0 , err = 0 ;
2014-05-12 14:57:48 +04:00
2019-10-28 19:32:55 +03:00
resource_list_for_each_entry ( entry , & bridge - > dma_ranges ) {
2020-05-07 15:33:13 +03:00
err = rcar_pcie_inbound_ranges ( & host - > pcie , entry , & index ) ;
2014-05-12 14:57:48 +04:00
if ( err )
2019-10-28 19:32:55 +03:00
break ;
2014-05-12 14:57:48 +04:00
}
2019-10-28 19:32:55 +03:00
return err ;
2014-05-12 14:57:48 +04:00
}
static const struct of_device_id rcar_pcie_of_match [ ] = {
2018-05-03 22:43:13 +03:00
{ . compatible = " renesas,pcie-r8a7779 " ,
. data = rcar_pcie_phy_init_h1 } ,
2016-09-09 01:26:18 +03:00
{ . compatible = " renesas,pcie-r8a7790 " ,
2018-05-03 22:43:13 +03:00
. data = rcar_pcie_phy_init_gen2 } ,
2016-09-09 01:26:18 +03:00
{ . compatible = " renesas,pcie-r8a7791 " ,
2018-05-03 22:43:13 +03:00
. data = rcar_pcie_phy_init_gen2 } ,
2016-12-06 18:51:30 +03:00
{ . compatible = " renesas,pcie-rcar-gen2 " ,
2018-05-03 22:43:13 +03:00
. data = rcar_pcie_phy_init_gen2 } ,
2018-05-03 22:40:54 +03:00
{ . compatible = " renesas,pcie-r8a7795 " ,
2018-05-03 22:43:13 +03:00
. data = rcar_pcie_phy_init_gen3 } ,
2018-05-03 22:40:54 +03:00
{ . compatible = " renesas,pcie-rcar-gen3 " ,
2018-05-03 22:43:13 +03:00
. data = rcar_pcie_phy_init_gen3 } ,
2014-05-12 14:57:48 +04:00
{ } ,
} ;
2015-11-25 18:30:37 +03:00
2014-05-12 14:57:48 +04:00
static int rcar_pcie_probe ( struct platform_device * pdev )
{
2016-10-10 22:31:28 +03:00
struct device * dev = & pdev - > dev ;
2020-05-07 15:33:13 +03:00
struct rcar_pcie_host * host ;
2014-05-12 14:57:48 +04:00
struct rcar_pcie * pcie ;
2019-03-25 14:40:58 +03:00
u32 data ;
2015-11-25 18:30:37 +03:00
int err ;
2017-06-28 23:13:57 +03:00
struct pci_host_bridge * bridge ;
2014-05-12 14:57:48 +04:00
2020-05-07 15:33:13 +03:00
bridge = pci_alloc_host_bridge ( sizeof ( * host ) ) ;
2017-06-28 23:13:57 +03:00
if ( ! bridge )
2014-05-12 14:57:48 +04:00
return - ENOMEM ;
2020-05-07 15:33:13 +03:00
host = pci_host_bridge_priv ( bridge ) ;
pcie = & host - > pcie ;
2016-10-10 22:31:28 +03:00
pcie - > dev = dev ;
2020-05-07 15:33:13 +03:00
platform_set_drvdata ( pdev , host ) ;
2014-05-12 14:57:48 +04:00
2020-05-07 15:33:13 +03:00
err = pci_parse_request_of_pci_ranges ( dev , & host - > resources ,
2019-10-31 01:30:57 +03:00
& bridge - > dma_ranges , NULL ) ;
2017-12-07 13:15:20 +03:00
if ( err )
goto err_free_bridge ;
2014-05-12 14:57:48 +04:00
2018-04-08 16:09:25 +03:00
pm_runtime_enable ( pcie - > dev ) ;
err = pm_runtime_get_sync ( pcie - > dev ) ;
if ( err < 0 ) {
dev_err ( pcie - > dev , " pm_runtime_get_sync failed \n " ) ;
goto err_pm_disable ;
}
2020-05-07 15:33:13 +03:00
err = rcar_pcie_get_resources ( host ) ;
2014-05-12 14:57:48 +04:00
if ( err < 0 ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " failed to request resources: %d \n " , err ) ;
2018-04-08 16:09:25 +03:00
goto err_pm_put ;
2014-05-12 14:57:48 +04:00
}
2020-05-07 15:33:13 +03:00
err = clk_prepare_enable ( host - > bus_clk ) ;
2018-05-24 17:36:19 +03:00
if ( err ) {
dev_err ( dev , " failed to enable bus clock: %d \n " , err ) ;
2018-05-24 17:36:20 +03:00
goto err_unmap_msi_irqs ;
2014-05-12 14:57:48 +04:00
}
2020-05-07 15:33:13 +03:00
err = rcar_pcie_parse_map_dma_ranges ( host ) ;
2016-09-09 01:26:18 +03:00
if ( err )
2018-05-24 17:36:19 +03:00
goto err_clk_disable ;
2014-05-12 14:57:48 +04:00
2020-05-07 15:33:13 +03:00
host - > phy_init_fn = of_device_get_match_data ( dev ) ;
err = host - > phy_init_fn ( host ) ;
2014-05-12 14:57:48 +04:00
if ( err ) {
2018-05-03 22:43:13 +03:00
dev_err ( dev , " failed to init PCIe PHY \n " ) ;
2018-05-24 17:36:19 +03:00
goto err_clk_disable ;
2016-01-05 16:00:30 +03:00
}
2014-05-12 14:57:48 +04:00
/* Failure to get a link might just be that no cards are inserted */
2018-05-03 22:43:13 +03:00
if ( rcar_pcie_hw_init ( pcie ) ) {
2016-10-10 22:31:28 +03:00
dev_info ( dev , " PCIe link down \n " ) ;
2016-12-16 14:50:04 +03:00
err = - ENODEV ;
2018-06-29 21:47:38 +03:00
goto err_phy_shutdown ;
2014-05-12 14:57:48 +04:00
}
2014-06-30 11:54:23 +04:00
data = rcar_pci_read_reg ( pcie , MACSR ) ;
2016-10-10 22:31:28 +03:00
dev_info ( dev , " PCIe x%d: link up \n " , ( data > > 20 ) & 0x3f ) ;
2014-05-12 14:57:48 +04:00
2016-01-05 16:00:30 +03:00
if ( IS_ENABLED ( CONFIG_PCI_MSI ) ) {
2020-05-07 15:33:13 +03:00
err = rcar_pcie_enable_msi ( host ) ;
2016-01-05 16:00:30 +03:00
if ( err < 0 ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev ,
2016-01-05 16:00:30 +03:00
" failed to enable MSI support: %d \n " ,
err ) ;
2018-06-29 21:47:38 +03:00
goto err_phy_shutdown ;
2016-01-05 16:00:30 +03:00
}
}
2020-05-07 15:33:13 +03:00
err = rcar_pcie_enable ( host ) ;
2016-01-05 16:00:30 +03:00
if ( err )
2018-05-24 17:36:21 +03:00
goto err_msi_teardown ;
2016-01-05 16:00:30 +03:00
return 0 ;
2018-05-24 17:36:21 +03:00
err_msi_teardown :
if ( IS_ENABLED ( CONFIG_PCI_MSI ) )
2020-05-07 15:33:13 +03:00
rcar_pcie_teardown_msi ( host ) ;
2018-05-24 17:36:21 +03:00
2018-06-29 21:47:38 +03:00
err_phy_shutdown :
2020-05-07 15:33:13 +03:00
if ( host - > phy ) {
phy_power_off ( host - > phy ) ;
phy_exit ( host - > phy ) ;
2018-06-29 21:47:38 +03:00
}
2018-05-24 17:36:19 +03:00
err_clk_disable :
2020-05-07 15:33:13 +03:00
clk_disable_unprepare ( host - > bus_clk ) ;
2018-05-24 17:36:19 +03:00
2018-05-24 17:36:20 +03:00
err_unmap_msi_irqs :
2020-05-07 15:33:13 +03:00
irq_dispose_mapping ( host - > msi . irq2 ) ;
irq_dispose_mapping ( host - > msi . irq1 ) ;
2018-05-24 17:36:20 +03:00
2016-01-05 16:00:30 +03:00
err_pm_put :
2016-10-10 22:31:28 +03:00
pm_runtime_put ( dev ) ;
2016-01-05 16:00:30 +03:00
err_pm_disable :
2016-10-10 22:31:28 +03:00
pm_runtime_disable ( dev ) ;
2020-05-07 15:33:13 +03:00
pci_free_resource_list ( & host - > resources ) ;
2018-04-08 16:09:25 +03:00
2017-12-07 13:15:20 +03:00
err_free_bridge :
2017-12-07 13:15:19 +03:00
pci_free_host_bridge ( bridge ) ;
2017-08-04 06:32:54 +03:00
2016-01-05 16:00:30 +03:00
return err ;
2014-05-12 14:57:48 +04:00
}
2020-03-14 22:12:32 +03:00
static int __maybe_unused rcar_pcie_resume ( struct device * dev )
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie_host * host = dev_get_drvdata ( dev ) ;
struct rcar_pcie * pcie = & host - > pcie ;
2020-03-14 22:12:32 +03:00
unsigned int data ;
int err ;
2020-05-07 15:33:13 +03:00
err = rcar_pcie_parse_map_dma_ranges ( host ) ;
2020-03-14 22:12:32 +03:00
if ( err )
return 0 ;
/* Failure to get a link might just be that no cards are inserted */
2020-05-07 15:33:13 +03:00
err = host - > phy_init_fn ( host ) ;
2020-03-14 22:12:32 +03:00
if ( err ) {
dev_info ( dev , " PCIe link down \n " ) ;
return 0 ;
}
data = rcar_pci_read_reg ( pcie , MACSR ) ;
dev_info ( dev , " PCIe x%d: link up \n " , ( data > > 20 ) & 0x3f ) ;
/* Enable MSI */
if ( IS_ENABLED ( CONFIG_PCI_MSI ) )
2020-05-07 15:33:13 +03:00
rcar_pcie_hw_enable_msi ( host ) ;
2020-03-14 22:12:32 +03:00
2020-05-07 15:33:13 +03:00
rcar_pcie_hw_enable ( host ) ;
2020-03-14 22:12:32 +03:00
return 0 ;
}
2019-03-25 22:43:19 +03:00
static int rcar_pcie_resume_noirq ( struct device * dev )
{
2020-05-07 15:33:13 +03:00
struct rcar_pcie_host * host = dev_get_drvdata ( dev ) ;
struct rcar_pcie * pcie = & host - > pcie ;
2019-03-25 22:43:19 +03:00
if ( rcar_pci_read_reg ( pcie , PMSR ) & &
! ( rcar_pci_read_reg ( pcie , PCIETCTLR ) & DL_DOWN ) )
return 0 ;
/* Re-establish the PCIe link */
2019-11-05 13:51:29 +03:00
rcar_pci_write_reg ( pcie , MACCTLR_INIT_VAL , MACCTLR ) ;
2019-03-25 22:43:19 +03:00
rcar_pci_write_reg ( pcie , CFINIT , PCIETCTLR ) ;
return rcar_pcie_wait_for_dl ( pcie ) ;
}
static const struct dev_pm_ops rcar_pcie_pm_ops = {
2020-03-14 22:12:32 +03:00
SET_SYSTEM_SLEEP_PM_OPS ( NULL , rcar_pcie_resume )
2019-03-25 22:43:19 +03:00
. resume_noirq = rcar_pcie_resume_noirq ,
} ;
2014-05-12 14:57:48 +04:00
static struct platform_driver rcar_pcie_driver = {
. driver = {
2016-10-06 21:40:28 +03:00
. name = " rcar-pcie " ,
2014-05-12 14:57:48 +04:00
. of_match_table = rcar_pcie_of_match ,
2019-03-25 22:43:19 +03:00
. pm = & rcar_pcie_pm_ops ,
2014-05-12 14:57:48 +04:00
. suppress_bind_attrs = true ,
} ,
. probe = rcar_pcie_probe ,
} ;
2016-07-23 00:23:21 +03:00
builtin_platform_driver ( rcar_pcie_driver ) ;