2018-01-26 21:50:27 +03:00
// SPDX-License-Identifier: GPL-2.0
2014-05-12 14:57:48 +04:00
/*
* PCIe driver for Renesas R - Car SoCs
* Copyright ( C ) 2014 Renesas Electronics Europe Ltd
*
* Based on :
* arch / sh / drivers / pci / pcie - sh7786 . c
* arch / sh / drivers / pci / ops - sh7786 . c
* Copyright ( C ) 2009 - 2011 Paul Mundt
*
2016-07-23 00:23:21 +03:00
* Author : Phil Edworthy < phil . edworthy @ renesas . com >
2014-05-12 14:57:48 +04:00
*/
2018-04-08 21:04:31 +03:00
# include <linux/bitops.h>
2014-05-12 14:57:48 +04:00
# include <linux/clk.h>
# include <linux/delay.h>
# include <linux/interrupt.h>
2014-05-12 14:57:49 +04:00
# include <linux/irq.h>
# include <linux/irqdomain.h>
2014-05-12 14:57:48 +04:00
# include <linux/kernel.h>
2016-07-23 00:23:21 +03:00
# include <linux/init.h>
2014-05-12 14:57:49 +04:00
# include <linux/msi.h>
2014-05-12 14:57:48 +04:00
# include <linux/of_address.h>
# include <linux/of_irq.h>
# include <linux/of_pci.h>
# include <linux/of_platform.h>
# include <linux/pci.h>
2018-05-03 22:40:54 +03:00
# include <linux/phy/phy.h>
2014-05-12 14:57:48 +04:00
# include <linux/platform_device.h>
2016-01-05 16:00:30 +03:00
# include <linux/pm_runtime.h>
2014-05-12 14:57:48 +04:00
# include <linux/slab.h>
2018-05-11 20:15:30 +03:00
# include "../pci.h"
2014-05-12 14:57:48 +04:00
# define PCIECAR 0x000010
# define PCIECCTLR 0x000018
2018-04-08 21:04:31 +03:00
# define CONFIG_SEND_ENABLE BIT(31)
2014-05-12 14:57:48 +04:00
# define TYPE0 (0 << 8)
2018-04-08 21:04:31 +03:00
# define TYPE1 BIT(8)
2014-05-12 14:57:48 +04:00
# define PCIECDR 0x000020
# define PCIEMSR 0x000028
# define PCIEINTXR 0x000400
2018-05-03 22:36:37 +03:00
# define PCIEPHYSR 0x0007f0
# define PHYRDY BIT(0)
2014-05-12 14:57:49 +04:00
# define PCIEMSITXR 0x000840
2014-05-12 14:57:48 +04:00
/* Transfer control */
# define PCIETCTLR 0x02000
2019-03-25 14:40:56 +03:00
# define CFINIT BIT(0)
2014-05-12 14:57:48 +04:00
# define PCIETSTR 0x02004
2019-03-25 14:40:56 +03:00
# define DATA_LINK_ACTIVE BIT(0)
2014-05-12 14:57:48 +04:00
# define PCIEERRFR 0x02020
2018-04-08 21:04:31 +03:00
# define UNSUPPORTED_REQUEST BIT(4)
2014-05-12 14:57:49 +04:00
# define PCIEMSIFR 0x02044
# define PCIEMSIALR 0x02048
2019-03-25 14:40:56 +03:00
# define MSIFE BIT(0)
2014-05-12 14:57:49 +04:00
# define PCIEMSIAUR 0x0204c
# define PCIEMSIIER 0x02050
2014-05-12 14:57:48 +04:00
/* root port address */
# define PCIEPRAR(x) (0x02080 + ((x) * 0x4))
/* local address reg & mask */
# define PCIELAR(x) (0x02200 + ((x) * 0x20))
# define PCIELAMR(x) (0x02208 + ((x) * 0x20))
2018-04-08 21:04:31 +03:00
# define LAM_PREFETCH BIT(3)
# define LAM_64BIT BIT(2)
# define LAR_ENABLE BIT(1)
2014-05-12 14:57:48 +04:00
/* PCIe address reg & mask */
2015-02-04 12:02:55 +03:00
# define PCIEPALR(x) (0x03400 + ((x) * 0x20))
# define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
2014-05-12 14:57:48 +04:00
# define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
# define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
2018-04-08 21:04:31 +03:00
# define PAR_ENABLE BIT(31)
# define IO_SPACE BIT(8)
2014-05-12 14:57:48 +04:00
/* Configuration */
# define PCICONF(x) (0x010000 + ((x) * 0x4))
# define PMCAP(x) (0x010040 + ((x) * 0x4))
# define EXPCAP(x) (0x010070 + ((x) * 0x4))
# define VCCAP(x) (0x010100 + ((x) * 0x4))
/* link layer */
# define IDSETR1 0x011004
# define TLCTLR 0x011048
# define MACSR 0x011054
2018-04-08 21:04:31 +03:00
# define SPCHGFIN BIT(4)
# define SPCHGFAIL BIT(6)
# define SPCHGSUC BIT(7)
2016-09-22 23:20:18 +03:00
# define LINK_SPEED (0xf << 16)
# define LINK_SPEED_2_5GTS (1 << 16)
# define LINK_SPEED_5_0GTS (2 << 16)
2014-05-12 14:57:48 +04:00
# define MACCTLR 0x011058
2018-04-08 21:04:31 +03:00
# define SPEED_CHANGE BIT(24)
# define SCRAMBLE_DISABLE BIT(27)
2016-09-22 23:20:18 +03:00
# define MACS2R 0x011078
# define MACCGSPSETR 0x011084
2018-04-08 21:04:31 +03:00
# define SPCNGRSN BIT(31)
2014-05-12 14:57:48 +04:00
/* R-Car H1 PHY */
# define H1_PCIEPHYADRR 0x04000c
2018-04-08 21:04:31 +03:00
# define WRITE_CMD BIT(16)
# define PHY_ACK BIT(24)
2014-05-12 14:57:48 +04:00
# define RATE_POS 12
# define LANE_POS 8
# define ADR_POS 0
# define H1_PCIEPHYDOUTR 0x040014
2016-01-05 16:00:31 +03:00
/* R-Car Gen2 PHY */
# define GEN2_PCIEPHYADDR 0x780
# define GEN2_PCIEPHYDATA 0x784
# define GEN2_PCIEPHYCTRL 0x78c
2018-04-08 21:04:31 +03:00
# define INT_PCI_MSI_NR 32
2014-05-12 14:57:49 +04:00
2018-04-08 21:04:31 +03:00
# define RCONF(x) (PCICONF(0) + (x))
# define RPMCAP(x) (PMCAP(0) + (x))
# define REXPCAP(x) (EXPCAP(0) + (x))
# define RVCCAP(x) (VCCAP(0) + (x))
2014-05-12 14:57:48 +04:00
2018-04-08 21:04:31 +03:00
# define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
# define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
# define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
2014-05-12 14:57:48 +04:00
2018-04-08 21:04:31 +03:00
# define RCAR_PCI_MAX_RESOURCES 4
# define MAX_NR_INBOUND_MAPS 6
2014-05-12 14:57:48 +04:00
2014-05-12 14:57:49 +04:00
struct rcar_msi {
DECLARE_BITMAP ( used , INT_PCI_MSI_NR ) ;
struct irq_domain * domain ;
2014-11-12 03:45:45 +03:00
struct msi_controller chip ;
2014-05-12 14:57:49 +04:00
unsigned long pages ;
struct mutex lock ;
int irq1 ;
int irq2 ;
} ;
2014-11-12 03:45:45 +03:00
static inline struct rcar_msi * to_rcar_msi ( struct msi_controller * chip )
2014-05-12 14:57:49 +04:00
{
return container_of ( chip , struct rcar_msi , chip ) ;
}
2014-05-12 14:57:48 +04:00
/* Structure representing the PCIe interface */
struct rcar_pcie {
struct device * dev ;
2018-05-03 22:40:54 +03:00
struct phy * phy ;
2014-05-12 14:57:48 +04:00
void __iomem * base ;
2015-11-25 18:30:37 +03:00
struct list_head resources ;
2014-05-12 14:57:48 +04:00
int root_bus_nr ;
struct clk * bus_clk ;
2014-05-12 14:57:49 +04:00
struct rcar_msi msi ;
2014-05-12 14:57:48 +04:00
} ;
2019-03-25 14:40:57 +03:00
static void rcar_pci_write_reg ( struct rcar_pcie * pcie , u32 val ,
unsigned int reg )
2014-05-12 14:57:48 +04:00
{
writel ( val , pcie - > base + reg ) ;
}
2019-03-25 14:40:57 +03:00
static u32 rcar_pci_read_reg ( struct rcar_pcie * pcie , unsigned int reg )
2014-05-12 14:57:48 +04:00
{
return readl ( pcie - > base + reg ) ;
}
enum {
2014-06-30 11:54:23 +04:00
RCAR_PCI_ACCESS_READ ,
RCAR_PCI_ACCESS_WRITE ,
2014-05-12 14:57:48 +04:00
} ;
static void rcar_rmw32 ( struct rcar_pcie * pcie , int where , u32 mask , u32 data )
{
2019-03-25 14:40:59 +03:00
unsigned int shift = BITS_PER_BYTE * ( where & 3 ) ;
2014-06-30 11:54:23 +04:00
u32 val = rcar_pci_read_reg ( pcie , where & ~ 3 ) ;
2014-05-12 14:57:48 +04:00
val & = ~ ( mask < < shift ) ;
val | = data < < shift ;
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , val , where & ~ 3 ) ;
2014-05-12 14:57:48 +04:00
}
static u32 rcar_read_conf ( struct rcar_pcie * pcie , int where )
{
2019-03-25 14:40:59 +03:00
unsigned int shift = BITS_PER_BYTE * ( where & 3 ) ;
2014-06-30 11:54:23 +04:00
u32 val = rcar_pci_read_reg ( pcie , where & ~ 3 ) ;
2014-05-12 14:57:48 +04:00
return val > > shift ;
}
/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
static int rcar_pcie_config_access ( struct rcar_pcie * pcie ,
unsigned char access_type , struct pci_bus * bus ,
unsigned int devfn , int where , u32 * data )
{
2019-03-25 14:40:58 +03:00
unsigned int dev , func , reg , index ;
2014-05-12 14:57:48 +04:00
dev = PCI_SLOT ( devfn ) ;
func = PCI_FUNC ( devfn ) ;
reg = where & ~ 3 ;
index = reg / 4 ;
/*
* While each channel has its own memory - mapped extended config
* space , it ' s generally only accessible when in endpoint mode .
* When in root complex mode , the controller is unable to target
* itself with either type 0 or type 1 accesses , and indeed , any
* controller initiated target transfer to its own config space
* result in a completer abort .
*
* Each channel effectively only supports a single device , but as
* the same channel < - > device access works for any PCI_SLOT ( )
* value , we cheat a bit here and bind the controller ' s config
* space to devfn 0 in order to enable self - enumeration . In this
* case the regular ECAR / ECDR path is sidelined and the mangled
* config access itself is initiated as an internal bus transaction .
*/
if ( pci_is_root_bus ( bus ) ) {
if ( dev ! = 0 )
return PCIBIOS_DEVICE_NOT_FOUND ;
2014-06-30 11:54:23 +04:00
if ( access_type = = RCAR_PCI_ACCESS_READ ) {
* data = rcar_pci_read_reg ( pcie , PCICONF ( index ) ) ;
2014-05-12 14:57:48 +04:00
} else {
/* Keep an eye out for changes to the root bus number */
if ( pci_is_root_bus ( bus ) & & ( reg = = PCI_PRIMARY_BUS ) )
pcie - > root_bus_nr = * data & 0xff ;
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , * data , PCICONF ( index ) ) ;
2014-05-12 14:57:48 +04:00
}
return PCIBIOS_SUCCESSFUL ;
}
if ( pcie - > root_bus_nr < 0 )
return PCIBIOS_DEVICE_NOT_FOUND ;
/* Clear errors */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , rcar_pci_read_reg ( pcie , PCIEERRFR ) , PCIEERRFR ) ;
2014-05-12 14:57:48 +04:00
/* Set the PIO address */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , PCIE_CONF_BUS ( bus - > number ) |
PCIE_CONF_DEV ( dev ) | PCIE_CONF_FUNC ( func ) | reg , PCIECAR ) ;
2014-05-12 14:57:48 +04:00
/* Enable the configuration access */
if ( bus - > parent - > number = = pcie - > root_bus_nr )
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , CONFIG_SEND_ENABLE | TYPE0 , PCIECCTLR ) ;
2014-05-12 14:57:48 +04:00
else
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , CONFIG_SEND_ENABLE | TYPE1 , PCIECCTLR ) ;
2014-05-12 14:57:48 +04:00
/* Check for errors */
2014-06-30 11:54:23 +04:00
if ( rcar_pci_read_reg ( pcie , PCIEERRFR ) & UNSUPPORTED_REQUEST )
2014-05-12 14:57:48 +04:00
return PCIBIOS_DEVICE_NOT_FOUND ;
/* Check for master and target aborts */
if ( rcar_read_conf ( pcie , RCONF ( PCI_STATUS ) ) &
( PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT ) )
return PCIBIOS_DEVICE_NOT_FOUND ;
2014-06-30 11:54:23 +04:00
if ( access_type = = RCAR_PCI_ACCESS_READ )
* data = rcar_pci_read_reg ( pcie , PCIECDR ) ;
2014-05-12 14:57:48 +04:00
else
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , * data , PCIECDR ) ;
2014-05-12 14:57:48 +04:00
/* Disable the configuration access */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 0 , PCIECCTLR ) ;
2014-05-12 14:57:48 +04:00
return PCIBIOS_SUCCESSFUL ;
}
static int rcar_pcie_read_conf ( struct pci_bus * bus , unsigned int devfn ,
int where , int size , u32 * val )
{
2015-10-02 13:25:05 +03:00
struct rcar_pcie * pcie = bus - > sysdata ;
2014-05-12 14:57:48 +04:00
int ret ;
2014-06-30 11:54:23 +04:00
ret = rcar_pcie_config_access ( pcie , RCAR_PCI_ACCESS_READ ,
2014-05-12 14:57:48 +04:00
bus , devfn , where , val ) ;
if ( ret ! = PCIBIOS_SUCCESSFUL ) {
* val = 0xffffffff ;
return ret ;
}
if ( size = = 1 )
2019-03-25 14:40:59 +03:00
* val = ( * val > > ( BITS_PER_BYTE * ( where & 3 ) ) ) & 0xff ;
2014-05-12 14:57:48 +04:00
else if ( size = = 2 )
2019-03-25 14:40:59 +03:00
* val = ( * val > > ( BITS_PER_BYTE * ( where & 2 ) ) ) & 0xffff ;
2014-05-12 14:57:48 +04:00
2014-04-19 04:13:50 +04:00
dev_dbg ( & bus - > dev , " pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx \n " ,
bus - > number , devfn , where , size , ( unsigned long ) * val ) ;
2014-05-12 14:57:48 +04:00
return ret ;
}
/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
static int rcar_pcie_write_conf ( struct pci_bus * bus , unsigned int devfn ,
int where , int size , u32 val )
{
2015-10-02 13:25:05 +03:00
struct rcar_pcie * pcie = bus - > sysdata ;
2019-03-25 14:40:58 +03:00
unsigned int shift ;
2014-05-12 14:57:48 +04:00
u32 data ;
2019-03-25 14:40:58 +03:00
int ret ;
2014-05-12 14:57:48 +04:00
2014-06-30 11:54:23 +04:00
ret = rcar_pcie_config_access ( pcie , RCAR_PCI_ACCESS_READ ,
2014-05-12 14:57:48 +04:00
bus , devfn , where , & data ) ;
if ( ret ! = PCIBIOS_SUCCESSFUL )
return ret ;
2014-04-19 04:13:50 +04:00
dev_dbg ( & bus - > dev , " pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx \n " ,
bus - > number , devfn , where , size , ( unsigned long ) val ) ;
2014-05-12 14:57:48 +04:00
if ( size = = 1 ) {
2019-03-25 14:40:59 +03:00
shift = BITS_PER_BYTE * ( where & 3 ) ;
2014-05-12 14:57:48 +04:00
data & = ~ ( 0xff < < shift ) ;
data | = ( ( val & 0xff ) < < shift ) ;
} else if ( size = = 2 ) {
2019-03-25 14:40:59 +03:00
shift = BITS_PER_BYTE * ( where & 2 ) ;
2014-05-12 14:57:48 +04:00
data & = ~ ( 0xffff < < shift ) ;
data | = ( ( val & 0xffff ) < < shift ) ;
} else
data = val ;
2014-06-30 11:54:23 +04:00
ret = rcar_pcie_config_access ( pcie , RCAR_PCI_ACCESS_WRITE ,
2014-05-12 14:57:48 +04:00
bus , devfn , where , & data ) ;
return ret ;
}
static struct pci_ops rcar_pcie_ops = {
. read = rcar_pcie_read_conf ,
. write = rcar_pcie_write_conf ,
} ;
2015-11-25 18:30:37 +03:00
static void rcar_pcie_setup_window ( int win , struct rcar_pcie * pcie ,
struct resource * res )
2014-05-12 14:57:48 +04:00
{
/* Setup PCIe address space mappings for each resource */
resource_size_t size ;
2014-09-29 18:29:25 +04:00
resource_size_t res_start ;
2014-05-12 14:57:48 +04:00
u32 mask ;
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 0x00000000 , PCIEPTCTLR ( win ) ) ;
2014-05-12 14:57:48 +04:00
/*
* The PAMR mask is calculated in units of 128 Bytes , which
* keeps things pretty simple .
*/
size = resource_size ( res ) ;
mask = ( roundup_pow_of_two ( size ) / SZ_128 ) - 1 ;
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , mask < < 7 , PCIEPAMR ( win ) ) ;
2014-05-12 14:57:48 +04:00
2014-09-29 18:29:25 +04:00
if ( res - > flags & IORESOURCE_IO )
res_start = pci_pio_to_address ( res - > start ) ;
else
res_start = res - > start ;
2015-02-04 12:02:55 +03:00
rcar_pci_write_reg ( pcie , upper_32_bits ( res_start ) , PCIEPAUR ( win ) ) ;
2015-02-02 08:09:58 +03:00
rcar_pci_write_reg ( pcie , lower_32_bits ( res_start ) & ~ 0x7F ,
2015-02-04 12:02:55 +03:00
PCIEPALR ( win ) ) ;
2014-05-12 14:57:48 +04:00
/* First resource is for IO */
mask = PAR_ENABLE ;
if ( res - > flags & IORESOURCE_IO )
mask | = IO_SPACE ;
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , mask , PCIEPTCTLR ( win ) ) ;
2014-05-12 14:57:48 +04:00
}
2015-11-25 18:30:37 +03:00
static int rcar_pcie_setup ( struct list_head * resource , struct rcar_pcie * pci )
2014-05-12 14:57:48 +04:00
{
2015-11-25 18:30:37 +03:00
struct resource_entry * win ;
int i = 0 ;
2014-05-12 14:57:48 +04:00
/* Setup PCI resources */
2015-11-25 18:30:37 +03:00
resource_list_for_each_entry ( win , & pci - > resources ) {
struct resource * res = win - > res ;
2014-05-12 14:57:48 +04:00
if ( ! res - > flags )
continue ;
2015-11-25 18:30:37 +03:00
switch ( resource_type ( res ) ) {
case IORESOURCE_IO :
case IORESOURCE_MEM :
rcar_pcie_setup_window ( i , pci , res ) ;
i + + ;
break ;
case IORESOURCE_BUS :
pci - > root_bus_nr = res - > start ;
break ;
default :
continue ;
2015-10-02 13:25:04 +03:00
}
2015-10-02 13:25:05 +03:00
pci_add_resource ( resource , res ) ;
2014-05-12 14:57:48 +04:00
}
return 1 ;
}
2016-09-22 23:20:18 +03:00
static void rcar_pcie_force_speedup ( struct rcar_pcie * pcie )
{
2016-10-10 22:31:28 +03:00
struct device * dev = pcie - > dev ;
2016-09-22 23:20:18 +03:00
unsigned int timeout = 1000 ;
u32 macsr ;
if ( ( rcar_pci_read_reg ( pcie , MACS2R ) & LINK_SPEED ) ! = LINK_SPEED_5_0GTS )
return ;
if ( rcar_pci_read_reg ( pcie , MACCTLR ) & SPEED_CHANGE ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " Speed change already in progress \n " ) ;
2016-09-22 23:20:18 +03:00
return ;
}
macsr = rcar_pci_read_reg ( pcie , MACSR ) ;
if ( ( macsr & LINK_SPEED ) = = LINK_SPEED_5_0GTS )
goto done ;
/* Set target link speed to 5.0 GT/s */
rcar_rmw32 ( pcie , EXPCAP ( 12 ) , PCI_EXP_LNKSTA_CLS ,
PCI_EXP_LNKSTA_CLS_5_0GB ) ;
/* Set speed change reason as intentional factor */
rcar_rmw32 ( pcie , MACCGSPSETR , SPCNGRSN , 0 ) ;
/* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
if ( macsr & ( SPCHGFIN | SPCHGSUC | SPCHGFAIL ) )
rcar_pci_write_reg ( pcie , macsr , MACSR ) ;
/* Start link speed change */
rcar_rmw32 ( pcie , MACCTLR , SPEED_CHANGE , SPEED_CHANGE ) ;
while ( timeout - - ) {
macsr = rcar_pci_read_reg ( pcie , MACSR ) ;
if ( macsr & SPCHGFIN ) {
/* Clear the interrupt bits */
rcar_pci_write_reg ( pcie , macsr , MACSR ) ;
if ( macsr & SPCHGFAIL )
2016-10-10 22:31:28 +03:00
dev_err ( dev , " Speed change failed \n " ) ;
2016-09-22 23:20:18 +03:00
goto done ;
}
msleep ( 1 ) ;
2018-03-07 18:42:39 +03:00
}
2016-09-22 23:20:18 +03:00
2016-10-10 22:31:28 +03:00
dev_err ( dev , " Speed change timed out \n " ) ;
2016-09-22 23:20:18 +03:00
done :
2016-10-10 22:31:28 +03:00
dev_info ( dev , " Current link speed is %s GT/s \n " ,
2016-09-22 23:20:18 +03:00
( macsr & LINK_SPEED ) = = LINK_SPEED_5_0GTS ? " 5 " : " 2.5 " ) ;
}
2015-10-02 13:25:05 +03:00
static int rcar_pcie_enable ( struct rcar_pcie * pcie )
2014-05-12 14:57:48 +04:00
{
2016-10-10 22:31:28 +03:00
struct device * dev = pcie - > dev ;
2017-06-28 23:13:57 +03:00
struct pci_host_bridge * bridge = pci_host_bridge_from_priv ( pcie ) ;
2015-10-02 13:25:05 +03:00
struct pci_bus * bus , * child ;
2017-06-28 23:13:57 +03:00
int ret ;
2014-05-12 14:57:48 +04:00
2016-09-22 23:20:18 +03:00
/* Try setting 5 GT/s link speed */
rcar_pcie_force_speedup ( pcie ) ;
2017-06-28 23:13:57 +03:00
rcar_pcie_setup ( & bridge - > windows , pcie ) ;
2015-10-02 13:25:05 +03:00
PCI: Remove PCI_REASSIGN_ALL_RSRC use on arm and arm64
On arm, PCI_REASSIGN_ALL_RSRC is used only in pcibios_assign_all_busses(),
which helps decide whether to reconfigure bridge bus numbers. It has
nothing to do with BAR assignments. On arm64 and powerpc,
pcibios_assign_all_busses() tests PCI_REASSIGN_ALL_BUS, which makes more
sense.
Align arm with arm64 and powerpc, so they all use PCI_REASSIGN_ALL_BUS for
pcibios_assign_all_busses().
Remove PCI_REASSIGN_ALL_RSRC from the generic, Tegra, Versatile, and
R-Car drivers. These drivers are used only on arm or arm64, where
PCI_REASSIGN_ALL_RSRC is not used after this change, so removing it
should have no effect.
No functional change intended.
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Reviewed-by: Manikanta Maddireddy <mmaddireddy@nvidia.com>
Reviewed-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
2017-11-30 20:21:57 +03:00
pci_add_flags ( PCI_REASSIGN_ALL_BUS ) ;
2015-10-02 13:25:05 +03:00
2017-06-28 23:13:57 +03:00
bridge - > dev . parent = dev ;
bridge - > sysdata = pcie ;
bridge - > busnr = pcie - > root_bus_nr ;
bridge - > ops = & rcar_pcie_ops ;
2017-06-28 23:14:06 +03:00
bridge - > map_irq = of_irq_parse_and_map_pci ;
bridge - > swizzle_irq = pci_common_swizzle ;
2015-10-02 13:25:05 +03:00
if ( IS_ENABLED ( CONFIG_PCI_MSI ) )
2017-06-28 23:13:57 +03:00
bridge - > msi = & pcie - > msi . chip ;
2015-10-02 13:25:05 +03:00
2017-06-28 23:13:57 +03:00
ret = pci_scan_root_bus_bridge ( bridge ) ;
2017-08-04 06:32:54 +03:00
if ( ret < 0 )
2017-06-28 23:13:57 +03:00
return ret ;
2015-10-02 13:25:05 +03:00
2017-06-28 23:13:57 +03:00
bus = bridge - > bus ;
2016-01-29 14:29:31 +03:00
pci_bus_size_bridges ( bus ) ;
pci_bus_assign_resources ( bus ) ;
2014-05-12 14:57:48 +04:00
2016-01-29 14:29:31 +03:00
list_for_each_entry ( child , & bus - > children , node )
pcie_bus_configure_settings ( child ) ;
2015-10-02 13:25:05 +03:00
pci_bus_add_devices ( bus ) ;
return 0 ;
2014-05-12 14:57:48 +04:00
}
static int phy_wait_for_ack ( struct rcar_pcie * pcie )
{
2016-10-10 22:31:28 +03:00
struct device * dev = pcie - > dev ;
2014-05-12 14:57:48 +04:00
unsigned int timeout = 100 ;
while ( timeout - - ) {
2014-06-30 11:54:23 +04:00
if ( rcar_pci_read_reg ( pcie , H1_PCIEPHYADRR ) & PHY_ACK )
2014-05-12 14:57:48 +04:00
return 0 ;
udelay ( 100 ) ;
}
2016-10-10 22:31:28 +03:00
dev_err ( dev , " Access to PCIe phy timed out \n " ) ;
2014-05-12 14:57:48 +04:00
return - ETIMEDOUT ;
}
static void phy_write_reg ( struct rcar_pcie * pcie ,
2019-03-25 14:40:58 +03:00
unsigned int rate , u32 addr ,
unsigned int lane , u32 data )
2014-05-12 14:57:48 +04:00
{
2019-03-25 14:40:58 +03:00
u32 phyaddr ;
2014-05-12 14:57:48 +04:00
phyaddr = WRITE_CMD |
( ( rate & 1 ) < < RATE_POS ) |
( ( lane & 0xf ) < < LANE_POS ) |
( ( addr & 0xff ) < < ADR_POS ) ;
/* Set write data */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , data , H1_PCIEPHYDOUTR ) ;
rcar_pci_write_reg ( pcie , phyaddr , H1_PCIEPHYADRR ) ;
2014-05-12 14:57:48 +04:00
/* Ignore errors as they will be dealt with if the data link is down */
phy_wait_for_ack ( pcie ) ;
/* Clear command */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 0 , H1_PCIEPHYDOUTR ) ;
rcar_pci_write_reg ( pcie , 0 , H1_PCIEPHYADRR ) ;
2014-05-12 14:57:48 +04:00
/* Ignore errors as they will be dealt with if the data link is down */
phy_wait_for_ack ( pcie ) ;
}
2018-05-03 22:36:37 +03:00
static int rcar_pcie_wait_for_phyrdy ( struct rcar_pcie * pcie )
2014-05-12 14:57:48 +04:00
{
unsigned int timeout = 10 ;
while ( timeout - - ) {
2018-05-03 22:36:37 +03:00
if ( rcar_pci_read_reg ( pcie , PCIEPHYSR ) & PHYRDY )
2014-05-12 14:57:48 +04:00
return 0 ;
msleep ( 5 ) ;
}
return - ETIMEDOUT ;
}
static int rcar_pcie_wait_for_dl ( struct rcar_pcie * pcie )
{
2018-05-22 15:24:20 +03:00
unsigned int timeout = 10000 ;
2014-05-12 14:57:48 +04:00
while ( timeout - - ) {
2014-06-30 11:54:23 +04:00
if ( ( rcar_pci_read_reg ( pcie , PCIETSTR ) & DATA_LINK_ACTIVE ) )
2014-05-12 14:57:48 +04:00
return 0 ;
2018-05-22 15:24:20 +03:00
udelay ( 5 ) ;
cpu_relax ( ) ;
2014-05-12 14:57:48 +04:00
}
return - ETIMEDOUT ;
}
static int rcar_pcie_hw_init ( struct rcar_pcie * pcie )
{
int err ;
/* Begin initialization */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 0 , PCIETCTLR ) ;
2014-05-12 14:57:48 +04:00
/* Set mode */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 1 , PCIEMSR ) ;
2014-05-12 14:57:48 +04:00
2018-05-03 22:36:37 +03:00
err = rcar_pcie_wait_for_phyrdy ( pcie ) ;
if ( err )
return err ;
2014-05-12 14:57:48 +04:00
/*
* Initial header for port config space is type 1 , set the device
* class to match . Hardware takes care of propagating the IDSETR
* settings , so there is no need to bother with a quirk .
*/
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , PCI_CLASS_BRIDGE_PCI < < 16 , IDSETR1 ) ;
2014-05-12 14:57:48 +04:00
/*
* Setup Secondary Bus Number & Subordinate Bus Number , even though
* they aren ' t used , to avoid bridge being detected as broken .
*/
rcar_rmw32 ( pcie , RCONF ( PCI_SECONDARY_BUS ) , 0xff , 1 ) ;
rcar_rmw32 ( pcie , RCONF ( PCI_SUBORDINATE_BUS ) , 0xff , 1 ) ;
/* Initialize default capabilities. */
2014-06-30 11:54:22 +04:00
rcar_rmw32 ( pcie , REXPCAP ( 0 ) , 0xff , PCI_CAP_ID_EXP ) ;
2014-05-12 14:57:48 +04:00
rcar_rmw32 ( pcie , REXPCAP ( PCI_EXP_FLAGS ) ,
PCI_EXP_FLAGS_TYPE , PCI_EXP_TYPE_ROOT_PORT < < 4 ) ;
rcar_rmw32 ( pcie , RCONF ( PCI_HEADER_TYPE ) , 0x7f ,
PCI_HEADER_TYPE_BRIDGE ) ;
/* Enable data link layer active state reporting */
2014-06-30 11:54:22 +04:00
rcar_rmw32 ( pcie , REXPCAP ( PCI_EXP_LNKCAP ) , PCI_EXP_LNKCAP_DLLLARC ,
PCI_EXP_LNKCAP_DLLLARC ) ;
2014-05-12 14:57:48 +04:00
/* Write out the physical slot number = 0 */
rcar_rmw32 ( pcie , REXPCAP ( PCI_EXP_SLTCAP ) , PCI_EXP_SLTCAP_PSN , 0 ) ;
/* Set the completion timer timeout to the maximum 50ms. */
2014-06-30 11:54:23 +04:00
rcar_rmw32 ( pcie , TLCTLR + 1 , 0x3f , 50 ) ;
2014-05-12 14:57:48 +04:00
/* Terminate list of capabilities (Next Capability Offset=0) */
2014-06-30 11:54:22 +04:00
rcar_rmw32 ( pcie , RVCCAP ( 0 ) , 0xfff00000 , 0 ) ;
2014-05-12 14:57:48 +04:00
2014-05-12 14:57:49 +04:00
/* Enable MSI */
if ( IS_ENABLED ( CONFIG_PCI_MSI ) )
2015-02-02 08:09:39 +03:00
rcar_pci_write_reg ( pcie , 0x801f0000 , PCIEMSITXR ) ;
2014-05-12 14:57:49 +04:00
2014-05-12 14:57:48 +04:00
/* Finish initialization - establish a PCI Express link */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , CFINIT , PCIETCTLR ) ;
2014-05-12 14:57:48 +04:00
/* This will timeout if we don't have a link. */
err = rcar_pcie_wait_for_dl ( pcie ) ;
if ( err )
return err ;
/* Enable INTx interrupts */
rcar_rmw32 ( pcie , PCIEINTXR , 0 , 0xF < < 8 ) ;
wmb ( ) ;
return 0 ;
}
2018-05-03 22:43:13 +03:00
static int rcar_pcie_phy_init_h1 ( struct rcar_pcie * pcie )
2014-05-12 14:57:48 +04:00
{
/* Initialize the phy */
phy_write_reg ( pcie , 0 , 0x42 , 0x1 , 0x0EC34191 ) ;
phy_write_reg ( pcie , 1 , 0x42 , 0x1 , 0x0EC34180 ) ;
phy_write_reg ( pcie , 0 , 0x43 , 0x1 , 0x00210188 ) ;
phy_write_reg ( pcie , 1 , 0x43 , 0x1 , 0x00210188 ) ;
phy_write_reg ( pcie , 0 , 0x44 , 0x1 , 0x015C0014 ) ;
phy_write_reg ( pcie , 1 , 0x44 , 0x1 , 0x015C0014 ) ;
phy_write_reg ( pcie , 1 , 0x4C , 0x1 , 0x786174A0 ) ;
phy_write_reg ( pcie , 1 , 0x4D , 0x1 , 0x048000BB ) ;
phy_write_reg ( pcie , 0 , 0x51 , 0x1 , 0x079EC062 ) ;
phy_write_reg ( pcie , 0 , 0x52 , 0x1 , 0x20000000 ) ;
phy_write_reg ( pcie , 1 , 0x52 , 0x1 , 0x20000000 ) ;
phy_write_reg ( pcie , 1 , 0x56 , 0x1 , 0x00003806 ) ;
phy_write_reg ( pcie , 0 , 0x60 , 0x1 , 0x004B03A5 ) ;
phy_write_reg ( pcie , 0 , 0x64 , 0x1 , 0x3F0F1F0F ) ;
phy_write_reg ( pcie , 0 , 0x66 , 0x1 , 0x00008000 ) ;
2018-05-03 22:43:13 +03:00
return 0 ;
2014-05-12 14:57:48 +04:00
}
2018-05-03 22:43:13 +03:00
static int rcar_pcie_phy_init_gen2 ( struct rcar_pcie * pcie )
2016-01-05 16:00:31 +03:00
{
/*
* These settings come from the R - Car Series , 2 nd Generation User ' s
* Manual , section 50.3 .1 ( 2 ) Initialization of the physical layer .
*/
rcar_pci_write_reg ( pcie , 0x000f0030 , GEN2_PCIEPHYADDR ) ;
rcar_pci_write_reg ( pcie , 0x00381203 , GEN2_PCIEPHYDATA ) ;
rcar_pci_write_reg ( pcie , 0x00000001 , GEN2_PCIEPHYCTRL ) ;
rcar_pci_write_reg ( pcie , 0x00000006 , GEN2_PCIEPHYCTRL ) ;
rcar_pci_write_reg ( pcie , 0x000f0054 , GEN2_PCIEPHYADDR ) ;
/* The following value is for DC connection, no termination resistor */
rcar_pci_write_reg ( pcie , 0x13802007 , GEN2_PCIEPHYDATA ) ;
rcar_pci_write_reg ( pcie , 0x00000001 , GEN2_PCIEPHYCTRL ) ;
rcar_pci_write_reg ( pcie , 0x00000006 , GEN2_PCIEPHYCTRL ) ;
2018-05-03 22:43:13 +03:00
return 0 ;
2016-01-05 16:00:31 +03:00
}
2018-05-03 22:43:13 +03:00
static int rcar_pcie_phy_init_gen3 ( struct rcar_pcie * pcie )
2018-05-03 22:40:54 +03:00
{
int err ;
err = phy_init ( pcie - > phy ) ;
if ( err )
return err ;
2018-06-29 21:48:15 +03:00
err = phy_power_on ( pcie - > phy ) ;
if ( err )
phy_exit ( pcie - > phy ) ;
return err ;
2016-01-05 16:00:31 +03:00
}
2014-05-12 14:57:49 +04:00
static int rcar_msi_alloc ( struct rcar_msi * chip )
{
int msi ;
mutex_lock ( & chip - > lock ) ;
msi = find_first_zero_bit ( chip - > used , INT_PCI_MSI_NR ) ;
if ( msi < INT_PCI_MSI_NR )
set_bit ( msi , chip - > used ) ;
else
msi = - ENOSPC ;
mutex_unlock ( & chip - > lock ) ;
return msi ;
}
2016-09-08 22:32:59 +03:00
static int rcar_msi_alloc_region ( struct rcar_msi * chip , int no_irqs )
{
int msi ;
mutex_lock ( & chip - > lock ) ;
msi = bitmap_find_free_region ( chip - > used , INT_PCI_MSI_NR ,
order_base_2 ( no_irqs ) ) ;
mutex_unlock ( & chip - > lock ) ;
return msi ;
}
2014-05-12 14:57:49 +04:00
static void rcar_msi_free ( struct rcar_msi * chip , unsigned long irq )
{
mutex_lock ( & chip - > lock ) ;
clear_bit ( irq , chip - > used ) ;
mutex_unlock ( & chip - > lock ) ;
}
static irqreturn_t rcar_pcie_msi_irq ( int irq , void * data )
{
struct rcar_pcie * pcie = data ;
struct rcar_msi * msi = & pcie - > msi ;
2016-10-10 22:31:28 +03:00
struct device * dev = pcie - > dev ;
2014-05-12 14:57:49 +04:00
unsigned long reg ;
2014-06-30 11:54:23 +04:00
reg = rcar_pci_read_reg ( pcie , PCIEMSIFR ) ;
2014-05-12 14:57:49 +04:00
/* MSI & INTx share an interrupt - we only handle MSI here */
if ( ! reg )
return IRQ_NONE ;
while ( reg ) {
unsigned int index = find_first_bit ( & reg , 32 ) ;
unsigned int irq ;
/* clear the interrupt */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 1 < < index , PCIEMSIFR ) ;
2014-05-12 14:57:49 +04:00
irq = irq_find_mapping ( msi - > domain , index ) ;
if ( irq ) {
if ( test_bit ( index , msi - > used ) )
generic_handle_irq ( irq ) ;
else
2016-10-10 22:31:28 +03:00
dev_info ( dev , " unhandled MSI \n " ) ;
2014-05-12 14:57:49 +04:00
} else {
/* Unknown MSI, just clear it */
2016-10-10 22:31:28 +03:00
dev_dbg ( dev , " unexpected MSI \n " ) ;
2014-05-12 14:57:49 +04:00
}
/* see if there's any more pending in this vector */
2014-06-30 11:54:23 +04:00
reg = rcar_pci_read_reg ( pcie , PCIEMSIFR ) ;
2014-05-12 14:57:49 +04:00
}
return IRQ_HANDLED ;
}
2014-11-12 03:45:45 +03:00
static int rcar_msi_setup_irq ( struct msi_controller * chip , struct pci_dev * pdev ,
2014-05-12 14:57:49 +04:00
struct msi_desc * desc )
{
struct rcar_msi * msi = to_rcar_msi ( chip ) ;
struct rcar_pcie * pcie = container_of ( chip , struct rcar_pcie , msi . chip ) ;
struct msi_msg msg ;
unsigned int irq ;
int hwirq ;
hwirq = rcar_msi_alloc ( msi ) ;
if ( hwirq < 0 )
return hwirq ;
2016-09-08 22:32:59 +03:00
irq = irq_find_mapping ( msi - > domain , hwirq ) ;
2014-05-12 14:57:49 +04:00
if ( ! irq ) {
rcar_msi_free ( msi , hwirq ) ;
return - EINVAL ;
}
irq_set_msi_desc ( irq , desc ) ;
2014-06-30 11:54:23 +04:00
msg . address_lo = rcar_pci_read_reg ( pcie , PCIEMSIALR ) & ~ MSIFE ;
msg . address_hi = rcar_pci_read_reg ( pcie , PCIEMSIAUR ) ;
2014-05-12 14:57:49 +04:00
msg . data = hwirq ;
2014-11-09 18:10:34 +03:00
pci_write_msi_msg ( irq , & msg ) ;
2014-05-12 14:57:49 +04:00
return 0 ;
}
2016-09-08 22:32:59 +03:00
static int rcar_msi_setup_irqs ( struct msi_controller * chip ,
struct pci_dev * pdev , int nvec , int type )
{
struct rcar_pcie * pcie = container_of ( chip , struct rcar_pcie , msi . chip ) ;
struct rcar_msi * msi = to_rcar_msi ( chip ) ;
struct msi_desc * desc ;
struct msi_msg msg ;
unsigned int irq ;
int hwirq ;
int i ;
/* MSI-X interrupts are not supported */
if ( type = = PCI_CAP_ID_MSIX )
return - EINVAL ;
WARN_ON ( ! list_is_singular ( & pdev - > dev . msi_list ) ) ;
desc = list_entry ( pdev - > dev . msi_list . next , struct msi_desc , list ) ;
hwirq = rcar_msi_alloc_region ( msi , nvec ) ;
if ( hwirq < 0 )
return - ENOSPC ;
irq = irq_find_mapping ( msi - > domain , hwirq ) ;
if ( ! irq )
return - ENOSPC ;
for ( i = 0 ; i < nvec ; i + + ) {
/*
* irq_create_mapping ( ) called from rcar_pcie_probe ( ) pre -
* allocates descs , so there is no need to allocate descs here .
* We can therefore assume that if irq_find_mapping ( ) above
* returns non - zero , then the descs are also successfully
* allocated .
*/
if ( irq_set_msi_desc_off ( irq , i , desc ) ) {
/* TODO: clear */
return - EINVAL ;
}
}
desc - > nvec_used = nvec ;
desc - > msi_attrib . multiple = order_base_2 ( nvec ) ;
msg . address_lo = rcar_pci_read_reg ( pcie , PCIEMSIALR ) & ~ MSIFE ;
msg . address_hi = rcar_pci_read_reg ( pcie , PCIEMSIAUR ) ;
msg . data = hwirq ;
pci_write_msi_msg ( irq , & msg ) ;
return 0 ;
}
2014-11-12 03:45:45 +03:00
static void rcar_msi_teardown_irq ( struct msi_controller * chip , unsigned int irq )
2014-05-12 14:57:49 +04:00
{
struct rcar_msi * msi = to_rcar_msi ( chip ) ;
struct irq_data * d = irq_get_irq_data ( irq ) ;
rcar_msi_free ( msi , d - > hwirq ) ;
}
static struct irq_chip rcar_msi_irq_chip = {
. name = " R-Car PCIe MSI " ,
2014-11-23 14:23:20 +03:00
. irq_enable = pci_msi_unmask_irq ,
. irq_disable = pci_msi_mask_irq ,
. irq_mask = pci_msi_mask_irq ,
. irq_unmask = pci_msi_unmask_irq ,
2014-05-12 14:57:49 +04:00
} ;
static int rcar_msi_map ( struct irq_domain * domain , unsigned int irq ,
irq_hw_number_t hwirq )
{
irq_set_chip_and_handler ( irq , & rcar_msi_irq_chip , handle_simple_irq ) ;
irq_set_chip_data ( irq , domain - > host_data ) ;
return 0 ;
}
static const struct irq_domain_ops msi_domain_ops = {
. map = rcar_msi_map ,
} ;
2018-05-24 17:36:23 +03:00
static void rcar_pcie_unmap_msi ( struct rcar_pcie * pcie )
{
struct rcar_msi * msi = & pcie - > msi ;
int i , irq ;
for ( i = 0 ; i < INT_PCI_MSI_NR ; i + + ) {
irq = irq_find_mapping ( msi - > domain , i ) ;
if ( irq > 0 )
irq_dispose_mapping ( irq ) ;
}
irq_domain_remove ( msi - > domain ) ;
}
2014-05-12 14:57:49 +04:00
static int rcar_pcie_enable_msi ( struct rcar_pcie * pcie )
{
2016-10-10 22:31:28 +03:00
struct device * dev = pcie - > dev ;
2014-05-12 14:57:49 +04:00
struct rcar_msi * msi = & pcie - > msi ;
unsigned long base ;
2016-09-08 22:32:59 +03:00
int err , i ;
2014-05-12 14:57:49 +04:00
mutex_init ( & msi - > lock ) ;
2016-10-10 22:31:28 +03:00
msi - > chip . dev = dev ;
2014-05-12 14:57:49 +04:00
msi - > chip . setup_irq = rcar_msi_setup_irq ;
2016-09-08 22:32:59 +03:00
msi - > chip . setup_irqs = rcar_msi_setup_irqs ;
2014-05-12 14:57:49 +04:00
msi - > chip . teardown_irq = rcar_msi_teardown_irq ;
2016-10-10 22:31:28 +03:00
msi - > domain = irq_domain_add_linear ( dev - > of_node , INT_PCI_MSI_NR ,
2014-05-12 14:57:49 +04:00
& msi_domain_ops , & msi - > chip ) ;
if ( ! msi - > domain ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " failed to create IRQ domain \n " ) ;
2014-05-12 14:57:49 +04:00
return - ENOMEM ;
}
2016-09-08 22:32:59 +03:00
for ( i = 0 ; i < INT_PCI_MSI_NR ; i + + )
irq_create_mapping ( msi - > domain , i ) ;
2014-05-12 14:57:49 +04:00
/* Two irqs are for MSI, but they are also used for non-MSI irqs */
2016-10-10 22:31:28 +03:00
err = devm_request_irq ( dev , msi - > irq1 , rcar_pcie_msi_irq ,
2015-12-10 22:18:20 +03:00
IRQF_SHARED | IRQF_NO_THREAD ,
rcar_msi_irq_chip . name , pcie ) ;
2014-05-12 14:57:49 +04:00
if ( err < 0 ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " failed to request IRQ: %d \n " , err ) ;
2014-05-12 14:57:49 +04:00
goto err ;
}
2016-10-10 22:31:28 +03:00
err = devm_request_irq ( dev , msi - > irq2 , rcar_pcie_msi_irq ,
2015-12-10 22:18:20 +03:00
IRQF_SHARED | IRQF_NO_THREAD ,
rcar_msi_irq_chip . name , pcie ) ;
2014-05-12 14:57:49 +04:00
if ( err < 0 ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " failed to request IRQ: %d \n " , err ) ;
2014-05-12 14:57:49 +04:00
goto err ;
}
/* setup MSI data target */
msi - > pages = __get_free_pages ( GFP_KERNEL , 0 ) ;
base = virt_to_phys ( ( void * ) msi - > pages ) ;
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , base | MSIFE , PCIEMSIALR ) ;
rcar_pci_write_reg ( pcie , 0 , PCIEMSIAUR ) ;
2014-05-12 14:57:49 +04:00
/* enable all MSI interrupts */
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 0xffffffff , PCIEMSIIER ) ;
2014-05-12 14:57:49 +04:00
return 0 ;
err :
2018-05-24 17:36:23 +03:00
rcar_pcie_unmap_msi ( pcie ) ;
2014-05-12 14:57:49 +04:00
return err ;
}
2018-05-24 17:36:21 +03:00
static void rcar_pcie_teardown_msi ( struct rcar_pcie * pcie )
{
struct rcar_msi * msi = & pcie - > msi ;
/* Disable all MSI interrupts */
rcar_pci_write_reg ( pcie , 0 , PCIEMSIIER ) ;
/* Disable address decoding of the MSI interrupt, MSIFE */
rcar_pci_write_reg ( pcie , 0 , PCIEMSIALR ) ;
free_pages ( msi - > pages , 0 ) ;
2018-05-24 17:36:23 +03:00
rcar_pcie_unmap_msi ( pcie ) ;
2018-05-24 17:36:21 +03:00
}
2016-10-06 21:40:27 +03:00
static int rcar_pcie_get_resources ( struct rcar_pcie * pcie )
2014-05-12 14:57:48 +04:00
{
2016-10-10 22:31:28 +03:00
struct device * dev = pcie - > dev ;
2014-05-12 14:57:48 +04:00
struct resource res ;
2014-05-12 14:57:49 +04:00
int err , i ;
2014-05-12 14:57:48 +04:00
2018-05-03 22:40:54 +03:00
pcie - > phy = devm_phy_optional_get ( dev , " pcie " ) ;
if ( IS_ERR ( pcie - > phy ) )
return PTR_ERR ( pcie - > phy ) ;
2016-10-10 22:31:28 +03:00
err = of_address_to_resource ( dev - > of_node , 0 , & res ) ;
2014-05-12 14:57:48 +04:00
if ( err )
return err ;
2016-10-10 22:31:28 +03:00
pcie - > base = devm_ioremap_resource ( dev , & res ) ;
2016-08-22 22:16:38 +03:00
if ( IS_ERR ( pcie - > base ) )
return PTR_ERR ( pcie - > base ) ;
2016-10-10 22:31:28 +03:00
pcie - > bus_clk = devm_clk_get ( dev , " pcie_bus " ) ;
2014-05-12 14:57:48 +04:00
if ( IS_ERR ( pcie - > bus_clk ) ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " cannot get pcie bus clock \n " ) ;
2018-04-08 16:09:25 +03:00
return PTR_ERR ( pcie - > bus_clk ) ;
2014-05-12 14:57:48 +04:00
}
2016-10-10 22:31:28 +03:00
i = irq_of_parse_and_map ( dev - > of_node , 0 ) ;
2014-11-15 01:21:53 +03:00
if ( ! i ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " cannot get platform resources for msi interrupt \n " ) ;
2014-05-12 14:57:49 +04:00
err = - ENOENT ;
2018-05-24 17:36:20 +03:00
goto err_irq1 ;
2014-05-12 14:57:49 +04:00
}
pcie - > msi . irq1 = i ;
2016-10-10 22:31:28 +03:00
i = irq_of_parse_and_map ( dev - > of_node , 1 ) ;
2014-11-15 01:21:53 +03:00
if ( ! i ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " cannot get platform resources for msi interrupt \n " ) ;
2014-05-12 14:57:49 +04:00
err = - ENOENT ;
2018-05-24 17:36:20 +03:00
goto err_irq2 ;
2014-05-12 14:57:49 +04:00
}
pcie - > msi . irq2 = i ;
2014-05-12 14:57:48 +04:00
return 0 ;
2018-05-24 17:36:20 +03:00
err_irq2 :
irq_dispose_mapping ( pcie - > msi . irq1 ) ;
err_irq1 :
2014-05-12 14:57:48 +04:00
return err ;
}
static int rcar_pcie_inbound_ranges ( struct rcar_pcie * pcie ,
struct of_pci_range * range ,
int * index )
{
u64 restype = range - > flags ;
u64 cpu_addr = range - > cpu_addr ;
u64 cpu_end = range - > cpu_addr + range - > size ;
u64 pci_addr = range - > pci_addr ;
u32 flags = LAM_64BIT | LAR_ENABLE ;
u64 mask ;
u64 size ;
int idx = * index ;
if ( restype & IORESOURCE_PREFETCH )
flags | = LAM_PREFETCH ;
/*
* If the size of the range is larger than the alignment of the start
* address , we have to use multiple entries to perform the mapping .
*/
if ( cpu_addr > 0 ) {
unsigned long nr_zeros = __ffs64 ( cpu_addr ) ;
u64 alignment = 1ULL < < nr_zeros ;
2014-06-30 11:54:23 +04:00
2014-05-12 14:57:48 +04:00
size = min ( range - > size , alignment ) ;
} else {
size = range - > size ;
}
/* Hardware supports max 4GiB inbound region */
size = min ( size , 1ULL < < 32 ) ;
mask = roundup_pow_of_two ( size ) - 1 ;
mask & = ~ 0xf ;
while ( cpu_addr < cpu_end ) {
/*
* Set up 64 - bit inbound regions as the range parser doesn ' t
* distinguish between 32 and 64 - bit types .
*/
2016-09-09 01:26:18 +03:00
rcar_pci_write_reg ( pcie , lower_32_bits ( pci_addr ) ,
PCIEPRAR ( idx ) ) ;
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , lower_32_bits ( cpu_addr ) , PCIELAR ( idx ) ) ;
2016-09-09 01:26:18 +03:00
rcar_pci_write_reg ( pcie , lower_32_bits ( mask ) | flags ,
PCIELAMR ( idx ) ) ;
2014-05-12 14:57:48 +04:00
2016-09-09 01:26:18 +03:00
rcar_pci_write_reg ( pcie , upper_32_bits ( pci_addr ) ,
PCIEPRAR ( idx + 1 ) ) ;
rcar_pci_write_reg ( pcie , upper_32_bits ( cpu_addr ) ,
PCIELAR ( idx + 1 ) ) ;
2014-06-30 11:54:23 +04:00
rcar_pci_write_reg ( pcie , 0 , PCIELAMR ( idx + 1 ) ) ;
2014-05-12 14:57:48 +04:00
pci_addr + = size ;
cpu_addr + = size ;
idx + = 2 ;
if ( idx > MAX_NR_INBOUND_MAPS ) {
dev_err ( pcie - > dev , " Failed to map inbound regions! \n " ) ;
return - EINVAL ;
}
}
* index = idx ;
return 0 ;
}
static int rcar_pcie_parse_map_dma_ranges ( struct rcar_pcie * pcie ,
struct device_node * np )
{
struct of_pci_range range ;
struct of_pci_range_parser parser ;
int index = 0 ;
int err ;
2017-09-26 13:26:55 +03:00
if ( of_pci_dma_range_parser_init ( & parser , np ) )
2014-05-12 14:57:48 +04:00
return - EINVAL ;
/* Get the dma-ranges from DT */
for_each_of_pci_range ( & parser , & range ) {
u64 end = range . cpu_addr + range . size - 1 ;
2016-09-09 01:26:18 +03:00
2014-05-12 14:57:48 +04:00
dev_dbg ( pcie - > dev , " 0x%08x 0x%016llx..0x%016llx -> 0x%016llx \n " ,
range . flags , range . cpu_addr , end , range . pci_addr ) ;
err = rcar_pcie_inbound_ranges ( pcie , & range , & index ) ;
if ( err )
return err ;
}
return 0 ;
}
static const struct of_device_id rcar_pcie_of_match [ ] = {
2018-05-03 22:43:13 +03:00
{ . compatible = " renesas,pcie-r8a7779 " ,
. data = rcar_pcie_phy_init_h1 } ,
2016-09-09 01:26:18 +03:00
{ . compatible = " renesas,pcie-r8a7790 " ,
2018-05-03 22:43:13 +03:00
. data = rcar_pcie_phy_init_gen2 } ,
2016-09-09 01:26:18 +03:00
{ . compatible = " renesas,pcie-r8a7791 " ,
2018-05-03 22:43:13 +03:00
. data = rcar_pcie_phy_init_gen2 } ,
2016-12-06 18:51:30 +03:00
{ . compatible = " renesas,pcie-rcar-gen2 " ,
2018-05-03 22:43:13 +03:00
. data = rcar_pcie_phy_init_gen2 } ,
2018-05-03 22:40:54 +03:00
{ . compatible = " renesas,pcie-r8a7795 " ,
2018-05-03 22:43:13 +03:00
. data = rcar_pcie_phy_init_gen3 } ,
2018-05-03 22:40:54 +03:00
{ . compatible = " renesas,pcie-rcar-gen3 " ,
2018-05-03 22:43:13 +03:00
. data = rcar_pcie_phy_init_gen3 } ,
2014-05-12 14:57:48 +04:00
{ } ,
} ;
2015-11-25 18:30:37 +03:00
2014-05-12 14:57:48 +04:00
static int rcar_pcie_probe ( struct platform_device * pdev )
{
2016-10-10 22:31:28 +03:00
struct device * dev = & pdev - > dev ;
2014-05-12 14:57:48 +04:00
struct rcar_pcie * pcie ;
2019-03-25 14:40:58 +03:00
u32 data ;
2015-11-25 18:30:37 +03:00
int err ;
2018-05-03 22:43:13 +03:00
int ( * phy_init_fn ) ( struct rcar_pcie * ) ;
2017-06-28 23:13:57 +03:00
struct pci_host_bridge * bridge ;
2014-05-12 14:57:48 +04:00
2017-06-28 23:13:57 +03:00
bridge = pci_alloc_host_bridge ( sizeof ( * pcie ) ) ;
if ( ! bridge )
2014-05-12 14:57:48 +04:00
return - ENOMEM ;
2017-06-28 23:13:57 +03:00
pcie = pci_host_bridge_priv ( bridge ) ;
2016-10-10 22:31:28 +03:00
pcie - > dev = dev ;
2014-05-12 14:57:48 +04:00
2018-04-25 18:21:25 +03:00
err = pci_parse_request_of_pci_ranges ( dev , & pcie - > resources , NULL ) ;
2017-12-07 13:15:20 +03:00
if ( err )
goto err_free_bridge ;
2014-05-12 14:57:48 +04:00
2018-04-08 16:09:25 +03:00
pm_runtime_enable ( pcie - > dev ) ;
err = pm_runtime_get_sync ( pcie - > dev ) ;
if ( err < 0 ) {
dev_err ( pcie - > dev , " pm_runtime_get_sync failed \n " ) ;
goto err_pm_disable ;
}
2016-10-06 21:40:27 +03:00
err = rcar_pcie_get_resources ( pcie ) ;
2014-05-12 14:57:48 +04:00
if ( err < 0 ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev , " failed to request resources: %d \n " , err ) ;
2018-04-08 16:09:25 +03:00
goto err_pm_put ;
2014-05-12 14:57:48 +04:00
}
2018-05-24 17:36:19 +03:00
err = clk_prepare_enable ( pcie - > bus_clk ) ;
if ( err ) {
dev_err ( dev , " failed to enable bus clock: %d \n " , err ) ;
2018-05-24 17:36:20 +03:00
goto err_unmap_msi_irqs ;
2014-05-12 14:57:48 +04:00
}
2016-10-10 22:31:28 +03:00
err = rcar_pcie_parse_map_dma_ranges ( pcie , dev - > of_node ) ;
2016-09-09 01:26:18 +03:00
if ( err )
2018-05-24 17:36:19 +03:00
goto err_clk_disable ;
2014-05-12 14:57:48 +04:00
2018-05-03 22:43:13 +03:00
phy_init_fn = of_device_get_match_data ( dev ) ;
err = phy_init_fn ( pcie ) ;
2014-05-12 14:57:48 +04:00
if ( err ) {
2018-05-03 22:43:13 +03:00
dev_err ( dev , " failed to init PCIe PHY \n " ) ;
2018-05-24 17:36:19 +03:00
goto err_clk_disable ;
2016-01-05 16:00:30 +03:00
}
2014-05-12 14:57:48 +04:00
/* Failure to get a link might just be that no cards are inserted */
2018-05-03 22:43:13 +03:00
if ( rcar_pcie_hw_init ( pcie ) ) {
2016-10-10 22:31:28 +03:00
dev_info ( dev , " PCIe link down \n " ) ;
2016-12-16 14:50:04 +03:00
err = - ENODEV ;
2018-06-29 21:47:38 +03:00
goto err_phy_shutdown ;
2014-05-12 14:57:48 +04:00
}
2014-06-30 11:54:23 +04:00
data = rcar_pci_read_reg ( pcie , MACSR ) ;
2016-10-10 22:31:28 +03:00
dev_info ( dev , " PCIe x%d: link up \n " , ( data > > 20 ) & 0x3f ) ;
2014-05-12 14:57:48 +04:00
2016-01-05 16:00:30 +03:00
if ( IS_ENABLED ( CONFIG_PCI_MSI ) ) {
err = rcar_pcie_enable_msi ( pcie ) ;
if ( err < 0 ) {
2016-10-10 22:31:28 +03:00
dev_err ( dev ,
2016-01-05 16:00:30 +03:00
" failed to enable MSI support: %d \n " ,
err ) ;
2018-06-29 21:47:38 +03:00
goto err_phy_shutdown ;
2016-01-05 16:00:30 +03:00
}
}
err = rcar_pcie_enable ( pcie ) ;
if ( err )
2018-05-24 17:36:21 +03:00
goto err_msi_teardown ;
2016-01-05 16:00:30 +03:00
return 0 ;
2018-05-24 17:36:21 +03:00
err_msi_teardown :
if ( IS_ENABLED ( CONFIG_PCI_MSI ) )
rcar_pcie_teardown_msi ( pcie ) ;
2018-06-29 21:47:38 +03:00
err_phy_shutdown :
if ( pcie - > phy ) {
phy_power_off ( pcie - > phy ) ;
phy_exit ( pcie - > phy ) ;
}
2018-05-24 17:36:19 +03:00
err_clk_disable :
clk_disable_unprepare ( pcie - > bus_clk ) ;
2018-05-24 17:36:20 +03:00
err_unmap_msi_irqs :
irq_dispose_mapping ( pcie - > msi . irq2 ) ;
irq_dispose_mapping ( pcie - > msi . irq1 ) ;
2016-01-05 16:00:30 +03:00
err_pm_put :
2016-10-10 22:31:28 +03:00
pm_runtime_put ( dev ) ;
2016-01-05 16:00:30 +03:00
err_pm_disable :
2016-10-10 22:31:28 +03:00
pm_runtime_disable ( dev ) ;
2017-08-04 06:32:55 +03:00
pci_free_resource_list ( & pcie - > resources ) ;
2018-04-08 16:09:25 +03:00
2017-12-07 13:15:20 +03:00
err_free_bridge :
2017-12-07 13:15:19 +03:00
pci_free_host_bridge ( bridge ) ;
2017-08-04 06:32:54 +03:00
2016-01-05 16:00:30 +03:00
return err ;
2014-05-12 14:57:48 +04:00
}
static struct platform_driver rcar_pcie_driver = {
. driver = {
2016-10-06 21:40:28 +03:00
. name = " rcar-pcie " ,
2014-05-12 14:57:48 +04:00
. of_match_table = rcar_pcie_of_match ,
. suppress_bind_attrs = true ,
} ,
. probe = rcar_pcie_probe ,
} ;
2016-07-23 00:23:21 +03:00
builtin_platform_driver ( rcar_pcie_driver ) ;