2013-07-31 17:14:10 +09:00
/*
* PCIe host controller driver for Samsung EXYNOS SoCs
*
* Copyright ( C ) 2013 Samsung Electronics Co . , Ltd .
* http : //www.samsung.com
*
* Author : Jingoo Han < jg1 . han @ samsung . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/clk.h>
# include <linux/delay.h>
# include <linux/gpio.h>
# include <linux/interrupt.h>
# include <linux/kernel.h>
2016-08-22 17:59:47 -04:00
# include <linux/init.h>
2013-07-31 17:14:10 +09:00
# include <linux/of_gpio.h>
# include <linux/pci.h>
# include <linux/platform_device.h>
# include <linux/resource.h>
# include <linux/signal.h>
# include <linux/types.h>
# include "pcie-designware.h"
# define to_exynos_pcie(x) container_of(x, struct exynos_pcie, pp)
struct exynos_pcie {
2016-10-06 13:33:40 -05:00
struct pcie_port pp ;
void __iomem * elbi_base ; /* DT 0th resource */
void __iomem * phy_base ; /* DT 1st resource */
void __iomem * block_base ; /* DT 2nd resource */
2013-07-31 17:14:10 +09:00
int reset_gpio ;
struct clk * clk ;
struct clk * bus_clk ;
} ;
/* PCIe ELBI registers */
# define PCIE_IRQ_PULSE 0x000
# define IRQ_INTA_ASSERT (0x1 << 0)
# define IRQ_INTB_ASSERT (0x1 << 2)
# define IRQ_INTC_ASSERT (0x1 << 4)
# define IRQ_INTD_ASSERT (0x1 << 6)
# define PCIE_IRQ_LEVEL 0x004
# define PCIE_IRQ_SPECIAL 0x008
# define PCIE_IRQ_EN_PULSE 0x00c
# define PCIE_IRQ_EN_LEVEL 0x010
2013-09-06 15:54:59 +09:00
# define IRQ_MSI_ENABLE (0x1 << 2)
2013-07-31 17:14:10 +09:00
# define PCIE_IRQ_EN_SPECIAL 0x014
# define PCIE_PWR_RESET 0x018
# define PCIE_CORE_RESET 0x01c
# define PCIE_CORE_RESET_ENABLE (0x1 << 0)
# define PCIE_STICKY_RESET 0x020
# define PCIE_NONSTICKY_RESET 0x024
# define PCIE_APP_INIT_RESET 0x028
# define PCIE_APP_LTSSM_ENABLE 0x02c
# define PCIE_ELBI_RDLH_LINKUP 0x064
# define PCIE_ELBI_LTSSM_ENABLE 0x1
# define PCIE_ELBI_SLV_AWMISC 0x11c
# define PCIE_ELBI_SLV_ARMISC 0x120
# define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
/* PCIe Purple registers */
# define PCIE_PHY_GLOBAL_RESET 0x000
# define PCIE_PHY_COMMON_RESET 0x004
# define PCIE_PHY_CMN_REG 0x008
# define PCIE_PHY_MAC_RESET 0x00c
# define PCIE_PHY_PLL_LOCKED 0x010
# define PCIE_PHY_TRSVREG_RESET 0x020
# define PCIE_PHY_TRSV_RESET 0x024
/* PCIe PHY registers */
# define PCIE_PHY_IMPEDANCE 0x004
# define PCIE_PHY_PLL_DIV_0 0x008
# define PCIE_PHY_PLL_BIAS 0x00c
# define PCIE_PHY_DCC_FEEDBACK 0x014
# define PCIE_PHY_PLL_DIV_1 0x05c
2013-09-06 17:21:45 +09:00
# define PCIE_PHY_COMMON_POWER 0x064
# define PCIE_PHY_COMMON_PD_CMN (0x1 << 3)
2013-07-31 17:14:10 +09:00
# define PCIE_PHY_TRSV0_EMP_LVL 0x084
# define PCIE_PHY_TRSV0_DRV_LVL 0x088
# define PCIE_PHY_TRSV0_RXCDR 0x0ac
2013-09-06 17:21:45 +09:00
# define PCIE_PHY_TRSV0_POWER 0x0c4
# define PCIE_PHY_TRSV0_PD_TSV (0x1 << 7)
2013-07-31 17:14:10 +09:00
# define PCIE_PHY_TRSV0_LVCC 0x0dc
# define PCIE_PHY_TRSV1_EMP_LVL 0x144
# define PCIE_PHY_TRSV1_RXCDR 0x16c
2013-09-06 17:21:45 +09:00
# define PCIE_PHY_TRSV1_POWER 0x184
# define PCIE_PHY_TRSV1_PD_TSV (0x1 << 7)
2013-07-31 17:14:10 +09:00
# define PCIE_PHY_TRSV1_LVCC 0x19c
# define PCIE_PHY_TRSV2_EMP_LVL 0x204
# define PCIE_PHY_TRSV2_RXCDR 0x22c
2013-09-06 17:21:45 +09:00
# define PCIE_PHY_TRSV2_POWER 0x244
# define PCIE_PHY_TRSV2_PD_TSV (0x1 << 7)
2013-07-31 17:14:10 +09:00
# define PCIE_PHY_TRSV2_LVCC 0x25c
# define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
# define PCIE_PHY_TRSV3_RXCDR 0x2ec
2013-09-06 17:21:45 +09:00
# define PCIE_PHY_TRSV3_POWER 0x304
# define PCIE_PHY_TRSV3_PD_TSV (0x1 << 7)
2013-07-31 17:14:10 +09:00
# define PCIE_PHY_TRSV3_LVCC 0x31c
2017-01-16 15:31:34 +09:00
static void exynos_elb_writel ( struct exynos_pcie * ep , u32 val , u32 reg )
2013-08-29 21:35:56 +09:00
{
2017-01-16 15:31:34 +09:00
writel ( val , ep - > elbi_base + reg ) ;
2013-08-29 21:35:56 +09:00
}
2017-01-16 15:31:34 +09:00
static u32 exynos_elb_readl ( struct exynos_pcie * ep , u32 reg )
2013-08-29 21:35:56 +09:00
{
2017-01-16 15:31:34 +09:00
return readl ( ep - > elbi_base + reg ) ;
2013-08-29 21:35:56 +09:00
}
2017-01-16 15:31:34 +09:00
static void exynos_phy_writel ( struct exynos_pcie * ep , u32 val , u32 reg )
2013-08-29 21:35:56 +09:00
{
2017-01-16 15:31:34 +09:00
writel ( val , ep - > phy_base + reg ) ;
2013-08-29 21:35:56 +09:00
}
2017-01-16 15:31:34 +09:00
static u32 exynos_phy_readl ( struct exynos_pcie * ep , u32 reg )
2013-08-29 21:35:56 +09:00
{
2017-01-16 15:31:34 +09:00
return readl ( ep - > phy_base + reg ) ;
2013-08-29 21:35:56 +09:00
}
2017-01-16 15:31:34 +09:00
static void exynos_blk_writel ( struct exynos_pcie * ep , u32 val , u32 reg )
2013-08-29 21:35:56 +09:00
{
2017-01-16 15:31:34 +09:00
writel ( val , ep - > block_base + reg ) ;
2013-08-29 21:35:56 +09:00
}
2017-01-16 15:31:34 +09:00
static u32 exynos_blk_readl ( struct exynos_pcie * ep , u32 reg )
2013-08-29 21:35:56 +09:00
{
2017-01-16 15:31:34 +09:00
return readl ( ep - > block_base + reg ) ;
2013-08-29 21:35:56 +09:00
}
2017-01-16 15:31:34 +09:00
static void exynos_pcie_sideband_dbi_w_mode ( struct exynos_pcie * ep , bool on )
2013-07-31 17:14:10 +09:00
{
u32 val ;
if ( on ) {
2017-01-16 15:31:34 +09:00
val = exynos_elb_readl ( ep , PCIE_ELBI_SLV_AWMISC ) ;
2013-07-31 17:14:10 +09:00
val | = PCIE_ELBI_SLV_DBI_ENABLE ;
2017-01-16 15:31:34 +09:00
exynos_elb_writel ( ep , val , PCIE_ELBI_SLV_AWMISC ) ;
2013-07-31 17:14:10 +09:00
} else {
2017-01-16 15:31:34 +09:00
val = exynos_elb_readl ( ep , PCIE_ELBI_SLV_AWMISC ) ;
2013-07-31 17:14:10 +09:00
val & = ~ PCIE_ELBI_SLV_DBI_ENABLE ;
2017-01-16 15:31:34 +09:00
exynos_elb_writel ( ep , val , PCIE_ELBI_SLV_AWMISC ) ;
2013-07-31 17:14:10 +09:00
}
}
2017-01-16 15:31:34 +09:00
static void exynos_pcie_sideband_dbi_r_mode ( struct exynos_pcie * ep , bool on )
2013-07-31 17:14:10 +09:00
{
u32 val ;
if ( on ) {
2017-01-16 15:31:34 +09:00
val = exynos_elb_readl ( ep , PCIE_ELBI_SLV_ARMISC ) ;
2013-07-31 17:14:10 +09:00
val | = PCIE_ELBI_SLV_DBI_ENABLE ;
2017-01-16 15:31:34 +09:00
exynos_elb_writel ( ep , val , PCIE_ELBI_SLV_ARMISC ) ;
2013-07-31 17:14:10 +09:00
} else {
2017-01-16 15:31:34 +09:00
val = exynos_elb_readl ( ep , PCIE_ELBI_SLV_ARMISC ) ;
2013-07-31 17:14:10 +09:00
val & = ~ PCIE_ELBI_SLV_DBI_ENABLE ;
2017-01-16 15:31:34 +09:00
exynos_elb_writel ( ep , val , PCIE_ELBI_SLV_ARMISC ) ;
2013-07-31 17:14:10 +09:00
}
}
2017-01-16 15:31:34 +09:00
static void exynos_pcie_assert_core_reset ( struct exynos_pcie * ep )
2013-07-31 17:14:10 +09:00
{
u32 val ;
2017-01-16 15:31:34 +09:00
val = exynos_elb_readl ( ep , PCIE_CORE_RESET ) ;
2013-07-31 17:14:10 +09:00
val & = ~ PCIE_CORE_RESET_ENABLE ;
2017-01-16 15:31:34 +09:00
exynos_elb_writel ( ep , val , PCIE_CORE_RESET ) ;
exynos_elb_writel ( ep , 0 , PCIE_PWR_RESET ) ;
exynos_elb_writel ( ep , 0 , PCIE_STICKY_RESET ) ;
exynos_elb_writel ( ep , 0 , PCIE_NONSTICKY_RESET ) ;
2013-07-31 17:14:10 +09:00
}
2017-01-16 15:31:34 +09:00
static void exynos_pcie_deassert_core_reset ( struct exynos_pcie * ep )
2013-07-31 17:14:10 +09:00
{
u32 val ;
2017-01-16 15:31:34 +09:00
val = exynos_elb_readl ( ep , PCIE_CORE_RESET ) ;
2013-07-31 17:14:10 +09:00
val | = PCIE_CORE_RESET_ENABLE ;
2013-08-29 21:35:56 +09:00
2017-01-16 15:31:34 +09:00
exynos_elb_writel ( ep , val , PCIE_CORE_RESET ) ;
exynos_elb_writel ( ep , 1 , PCIE_STICKY_RESET ) ;
exynos_elb_writel ( ep , 1 , PCIE_NONSTICKY_RESET ) ;
exynos_elb_writel ( ep , 1 , PCIE_APP_INIT_RESET ) ;
exynos_elb_writel ( ep , 0 , PCIE_APP_INIT_RESET ) ;
exynos_blk_writel ( ep , 1 , PCIE_PHY_MAC_RESET ) ;
2013-07-31 17:14:10 +09:00
}
2017-01-16 15:31:34 +09:00
static void exynos_pcie_assert_phy_reset ( struct exynos_pcie * ep )
2013-07-31 17:14:10 +09:00
{
2017-01-16 15:31:34 +09:00
exynos_blk_writel ( ep , 0 , PCIE_PHY_MAC_RESET ) ;
exynos_blk_writel ( ep , 1 , PCIE_PHY_GLOBAL_RESET ) ;
2013-07-31 17:14:10 +09:00
}
2017-01-16 15:31:34 +09:00
static void exynos_pcie_deassert_phy_reset ( struct exynos_pcie * ep )
2013-07-31 17:14:10 +09:00
{
2017-01-16 15:31:34 +09:00
exynos_blk_writel ( ep , 0 , PCIE_PHY_GLOBAL_RESET ) ;
exynos_elb_writel ( ep , 1 , PCIE_PWR_RESET ) ;
exynos_blk_writel ( ep , 0 , PCIE_PHY_COMMON_RESET ) ;
exynos_blk_writel ( ep , 0 , PCIE_PHY_CMN_REG ) ;
exynos_blk_writel ( ep , 0 , PCIE_PHY_TRSVREG_RESET ) ;
exynos_blk_writel ( ep , 0 , PCIE_PHY_TRSV_RESET ) ;
2013-07-31 17:14:10 +09:00
}
2017-01-16 15:31:34 +09:00
static void exynos_pcie_power_on_phy ( struct exynos_pcie * ep )
2013-09-06 17:21:45 +09:00
{
u32 val ;
2017-01-16 15:31:34 +09:00
val = exynos_phy_readl ( ep , PCIE_PHY_COMMON_POWER ) ;
2013-09-06 17:21:45 +09:00
val & = ~ PCIE_PHY_COMMON_PD_CMN ;
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , val , PCIE_PHY_COMMON_POWER ) ;
2013-09-06 17:21:45 +09:00
2017-01-16 15:31:34 +09:00
val = exynos_phy_readl ( ep , PCIE_PHY_TRSV0_POWER ) ;
2013-09-06 17:21:45 +09:00
val & = ~ PCIE_PHY_TRSV0_PD_TSV ;
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , val , PCIE_PHY_TRSV0_POWER ) ;
2013-09-06 17:21:45 +09:00
2017-01-16 15:31:34 +09:00
val = exynos_phy_readl ( ep , PCIE_PHY_TRSV1_POWER ) ;
2013-09-06 17:21:45 +09:00
val & = ~ PCIE_PHY_TRSV1_PD_TSV ;
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , val , PCIE_PHY_TRSV1_POWER ) ;
2013-09-06 17:21:45 +09:00
2017-01-16 15:31:34 +09:00
val = exynos_phy_readl ( ep , PCIE_PHY_TRSV2_POWER ) ;
2013-09-06 17:21:45 +09:00
val & = ~ PCIE_PHY_TRSV2_PD_TSV ;
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , val , PCIE_PHY_TRSV2_POWER ) ;
2013-09-06 17:21:45 +09:00
2017-01-16 15:31:34 +09:00
val = exynos_phy_readl ( ep , PCIE_PHY_TRSV3_POWER ) ;
2013-09-06 17:21:45 +09:00
val & = ~ PCIE_PHY_TRSV3_PD_TSV ;
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , val , PCIE_PHY_TRSV3_POWER ) ;
2013-09-06 17:21:45 +09:00
}
2017-01-16 15:31:34 +09:00
static void exynos_pcie_power_off_phy ( struct exynos_pcie * ep )
2013-09-06 17:21:45 +09:00
{
u32 val ;
2017-01-16 15:31:34 +09:00
val = exynos_phy_readl ( ep , PCIE_PHY_COMMON_POWER ) ;
2013-09-06 17:21:45 +09:00
val | = PCIE_PHY_COMMON_PD_CMN ;
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , val , PCIE_PHY_COMMON_POWER ) ;
2013-09-06 17:21:45 +09:00
2017-01-16 15:31:34 +09:00
val = exynos_phy_readl ( ep , PCIE_PHY_TRSV0_POWER ) ;
2013-09-06 17:21:45 +09:00
val | = PCIE_PHY_TRSV0_PD_TSV ;
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , val , PCIE_PHY_TRSV0_POWER ) ;
2013-09-06 17:21:45 +09:00
2017-01-16 15:31:34 +09:00
val = exynos_phy_readl ( ep , PCIE_PHY_TRSV1_POWER ) ;
2013-09-06 17:21:45 +09:00
val | = PCIE_PHY_TRSV1_PD_TSV ;
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , val , PCIE_PHY_TRSV1_POWER ) ;
2013-09-06 17:21:45 +09:00
2017-01-16 15:31:34 +09:00
val = exynos_phy_readl ( ep , PCIE_PHY_TRSV2_POWER ) ;
2013-09-06 17:21:45 +09:00
val | = PCIE_PHY_TRSV2_PD_TSV ;
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , val , PCIE_PHY_TRSV2_POWER ) ;
2013-09-06 17:21:45 +09:00
2017-01-16 15:31:34 +09:00
val = exynos_phy_readl ( ep , PCIE_PHY_TRSV3_POWER ) ;
2013-09-06 17:21:45 +09:00
val | = PCIE_PHY_TRSV3_PD_TSV ;
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , val , PCIE_PHY_TRSV3_POWER ) ;
2013-09-06 17:21:45 +09:00
}
2017-01-16 15:31:34 +09:00
static void exynos_pcie_init_phy ( struct exynos_pcie * ep )
2013-07-31 17:14:10 +09:00
{
/* DCC feedback control off */
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , 0x29 , PCIE_PHY_DCC_FEEDBACK ) ;
2013-07-31 17:14:10 +09:00
/* set TX/RX impedance */
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , 0xd5 , PCIE_PHY_IMPEDANCE ) ;
2013-07-31 17:14:10 +09:00
/* set 50Mhz PHY clock */
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , 0x14 , PCIE_PHY_PLL_DIV_0 ) ;
exynos_phy_writel ( ep , 0x12 , PCIE_PHY_PLL_DIV_1 ) ;
2013-07-31 17:14:10 +09:00
/* set TX Differential output for lane 0 */
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , 0x7f , PCIE_PHY_TRSV0_DRV_LVL ) ;
2013-07-31 17:14:10 +09:00
/* set TX Pre-emphasis Level Control for lane 0 to minimum */
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , 0x0 , PCIE_PHY_TRSV0_EMP_LVL ) ;
2013-07-31 17:14:10 +09:00
/* set RX clock and data recovery bandwidth */
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , 0xe7 , PCIE_PHY_PLL_BIAS ) ;
exynos_phy_writel ( ep , 0x82 , PCIE_PHY_TRSV0_RXCDR ) ;
exynos_phy_writel ( ep , 0x82 , PCIE_PHY_TRSV1_RXCDR ) ;
exynos_phy_writel ( ep , 0x82 , PCIE_PHY_TRSV2_RXCDR ) ;
exynos_phy_writel ( ep , 0x82 , PCIE_PHY_TRSV3_RXCDR ) ;
2013-07-31 17:14:10 +09:00
/* change TX Pre-emphasis Level Control for lanes */
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , 0x39 , PCIE_PHY_TRSV0_EMP_LVL ) ;
exynos_phy_writel ( ep , 0x39 , PCIE_PHY_TRSV1_EMP_LVL ) ;
exynos_phy_writel ( ep , 0x39 , PCIE_PHY_TRSV2_EMP_LVL ) ;
exynos_phy_writel ( ep , 0x39 , PCIE_PHY_TRSV3_EMP_LVL ) ;
2013-07-31 17:14:10 +09:00
/* set LVCC */
2017-01-16 15:31:34 +09:00
exynos_phy_writel ( ep , 0x20 , PCIE_PHY_TRSV0_LVCC ) ;
exynos_phy_writel ( ep , 0xa0 , PCIE_PHY_TRSV1_LVCC ) ;
exynos_phy_writel ( ep , 0xa0 , PCIE_PHY_TRSV2_LVCC ) ;
exynos_phy_writel ( ep , 0xa0 , PCIE_PHY_TRSV3_LVCC ) ;
2013-07-31 17:14:10 +09:00
}
2017-01-16 15:31:34 +09:00
static void exynos_pcie_assert_reset ( struct exynos_pcie * ep )
2013-07-31 17:14:10 +09:00
{
2017-01-16 15:31:34 +09:00
struct pcie_port * pp = & ep - > pp ;
2016-10-06 13:33:41 -05:00
struct device * dev = pp - > dev ;
2013-07-31 17:14:10 +09:00
2017-01-16 15:31:34 +09:00
if ( ep - > reset_gpio > = 0 )
devm_gpio_request_one ( dev , ep - > reset_gpio ,
2013-07-31 17:14:10 +09:00
GPIOF_OUT_INIT_HIGH , " RESET " ) ;
}
2017-01-16 15:31:34 +09:00
static int exynos_pcie_establish_link ( struct exynos_pcie * ep )
2013-07-31 17:14:10 +09:00
{
2017-01-16 15:31:34 +09:00
struct pcie_port * pp = & ep - > pp ;
2016-10-06 13:33:41 -05:00
struct device * dev = pp - > dev ;
2015-06-02 16:47:17 -05:00
u32 val ;
2013-07-31 17:14:10 +09:00
if ( dw_pcie_link_up ( pp ) ) {
2016-10-06 13:33:41 -05:00
dev_err ( dev , " Link already up \n " ) ;
2013-07-31 17:14:10 +09:00
return 0 ;
}
2017-01-16 15:31:34 +09:00
exynos_pcie_assert_core_reset ( ep ) ;
exynos_pcie_assert_phy_reset ( ep ) ;
exynos_pcie_deassert_phy_reset ( ep ) ;
exynos_pcie_power_on_phy ( ep ) ;
exynos_pcie_init_phy ( ep ) ;
2013-07-31 17:14:10 +09:00
/* pulse for common reset */
2017-01-16 15:31:34 +09:00
exynos_blk_writel ( ep , 1 , PCIE_PHY_COMMON_RESET ) ;
2013-07-31 17:14:10 +09:00
udelay ( 500 ) ;
2017-01-16 15:31:34 +09:00
exynos_blk_writel ( ep , 0 , PCIE_PHY_COMMON_RESET ) ;
2013-07-31 17:14:10 +09:00
2017-01-16 15:31:34 +09:00
exynos_pcie_deassert_core_reset ( ep ) ;
2013-07-31 17:14:10 +09:00
dw_pcie_setup_rc ( pp ) ;
2017-01-16 15:31:34 +09:00
exynos_pcie_assert_reset ( ep ) ;
2013-07-31 17:14:10 +09:00
/* assert LTSSM enable */
2017-01-16 15:31:34 +09:00
exynos_elb_writel ( ep , PCIE_ELBI_LTSSM_ENABLE ,
2013-08-29 21:35:56 +09:00
PCIE_APP_LTSSM_ENABLE ) ;
2013-07-31 17:14:10 +09:00
/* check if the link is up or not */
PCI: designware: Add generic dw_pcie_wait_for_link()
Several DesignWare-based drivers (dra7xx, exynos, imx6, keystone, qcom, and
spear13xx) had similar loops waiting for the link to come up.
Add a generic dw_pcie_wait_for_link() for use by all these drivers so the
waiting is done consistently, e.g., always using usleep_range() rather than
mdelay() and using similar timeouts and retry counts.
Note that this changes the Keystone link training/wait for link strategy,
so we initiate link training, then wait longer for the link to come up
before re-initiating link training.
[bhelgaas: changelog, split into its own patch, update pci-keystone.c, pcie-qcom.c]
Signed-off-by: Joao Pinto <jpinto@synopsys.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Acked-by: Pratyush Anand <pratyush.anand@gmail.com>
2016-03-10 14:44:35 -06:00
if ( ! dw_pcie_wait_for_link ( pp ) )
return 0 ;
2013-07-31 17:14:10 +09:00
2017-01-16 15:31:34 +09:00
while ( exynos_phy_readl ( ep , PCIE_PHY_PLL_LOCKED ) = = 0 ) {
val = exynos_blk_readl ( ep , PCIE_PHY_PLL_LOCKED ) ;
2016-10-06 13:33:41 -05:00
dev_info ( dev , " PLL Locked: 0x%x \n " , val ) ;
2015-06-02 16:47:17 -05:00
}
2017-01-16 15:31:34 +09:00
exynos_pcie_power_off_phy ( ep ) ;
PCI: designware: Add generic dw_pcie_wait_for_link()
Several DesignWare-based drivers (dra7xx, exynos, imx6, keystone, qcom, and
spear13xx) had similar loops waiting for the link to come up.
Add a generic dw_pcie_wait_for_link() for use by all these drivers so the
waiting is done consistently, e.g., always using usleep_range() rather than
mdelay() and using similar timeouts and retry counts.
Note that this changes the Keystone link training/wait for link strategy,
so we initiate link training, then wait longer for the link to come up
before re-initiating link training.
[bhelgaas: changelog, split into its own patch, update pci-keystone.c, pcie-qcom.c]
Signed-off-by: Joao Pinto <jpinto@synopsys.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Acked-by: Pratyush Anand <pratyush.anand@gmail.com>
2016-03-10 14:44:35 -06:00
return - ETIMEDOUT ;
2013-07-31 17:14:10 +09:00
}
2017-01-16 15:31:34 +09:00
static void exynos_pcie_clear_irq_pulse ( struct exynos_pcie * ep )
2013-07-31 17:14:10 +09:00
{
u32 val ;
2017-01-16 15:31:34 +09:00
val = exynos_elb_readl ( ep , PCIE_IRQ_PULSE ) ;
exynos_elb_writel ( ep , val , PCIE_IRQ_PULSE ) ;
2013-07-31 17:14:10 +09:00
}
2017-01-16 15:31:34 +09:00
static void exynos_pcie_enable_irq_pulse ( struct exynos_pcie * ep )
2013-07-31 17:14:10 +09:00
{
u32 val ;
/* enable INTX interrupt */
val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
2015-03-25 14:13:12 +09:00
IRQ_INTC_ASSERT | IRQ_INTD_ASSERT ;
2017-01-16 15:31:34 +09:00
exynos_elb_writel ( ep , val , PCIE_IRQ_EN_PULSE ) ;
2013-07-31 17:14:10 +09:00
}
static irqreturn_t exynos_pcie_irq_handler ( int irq , void * arg )
{
2017-01-16 15:31:34 +09:00
struct exynos_pcie * ep = arg ;
2013-07-31 17:14:10 +09:00
2017-01-16 15:31:34 +09:00
exynos_pcie_clear_irq_pulse ( ep ) ;
2013-07-31 17:14:10 +09:00
return IRQ_HANDLED ;
}
2013-09-06 15:54:59 +09:00
static irqreturn_t exynos_pcie_msi_irq_handler ( int irq , void * arg )
{
2017-01-16 15:31:34 +09:00
struct exynos_pcie * ep = arg ;
struct pcie_port * pp = & ep - > pp ;
2013-09-06 15:54:59 +09:00
2014-03-28 17:52:58 +01:00
return dw_handle_msi_irq ( pp ) ;
2013-09-06 15:54:59 +09:00
}
2017-01-16 15:31:34 +09:00
static void exynos_pcie_msi_init ( struct exynos_pcie * ep )
2013-09-06 15:54:59 +09:00
{
2017-01-16 15:31:34 +09:00
struct pcie_port * pp = & ep - > pp ;
2013-09-06 15:54:59 +09:00
u32 val ;
dw_pcie_msi_init ( pp ) ;
/* enable MSI interrupt */
2017-01-16 15:31:34 +09:00
val = exynos_elb_readl ( ep , PCIE_IRQ_EN_LEVEL ) ;
2013-09-06 15:54:59 +09:00
val | = IRQ_MSI_ENABLE ;
2017-01-16 15:31:34 +09:00
exynos_elb_writel ( ep , val , PCIE_IRQ_EN_LEVEL ) ;
2013-09-06 15:54:59 +09:00
}
2017-01-16 15:31:34 +09:00
static void exynos_pcie_enable_interrupts ( struct exynos_pcie * ep )
2013-07-31 17:14:10 +09:00
{
2017-01-16 15:31:34 +09:00
exynos_pcie_enable_irq_pulse ( ep ) ;
2013-09-06 15:54:59 +09:00
if ( IS_ENABLED ( CONFIG_PCI_MSI ) )
2017-01-16 15:31:34 +09:00
exynos_pcie_msi_init ( ep ) ;
2013-07-31 17:14:10 +09:00
}
2016-10-10 07:50:07 -05:00
static u32 exynos_pcie_readl_rc ( struct pcie_port * pp , u32 reg )
2013-07-31 17:14:10 +09:00
{
2017-01-16 15:31:34 +09:00
struct exynos_pcie * ep = to_exynos_pcie ( pp ) ;
2016-08-17 14:17:58 -05:00
u32 val ;
2017-01-16 15:31:34 +09:00
exynos_pcie_sideband_dbi_r_mode ( ep , true ) ;
2016-10-06 13:25:46 -05:00
val = readl ( pp - > dbi_base + reg ) ;
2017-01-16 15:31:34 +09:00
exynos_pcie_sideband_dbi_r_mode ( ep , false ) ;
2016-08-17 14:17:58 -05:00
return val ;
2013-07-31 17:14:10 +09:00
}
2016-10-10 07:50:07 -05:00
static void exynos_pcie_writel_rc ( struct pcie_port * pp , u32 reg , u32 val )
2013-07-31 17:14:10 +09:00
{
2017-01-16 15:31:34 +09:00
struct exynos_pcie * ep = to_exynos_pcie ( pp ) ;
2016-10-06 13:33:39 -05:00
2017-01-16 15:31:34 +09:00
exynos_pcie_sideband_dbi_w_mode ( ep , true ) ;
2016-10-06 13:25:46 -05:00
writel ( val , pp - > dbi_base + reg ) ;
2017-01-16 15:31:34 +09:00
exynos_pcie_sideband_dbi_w_mode ( ep , false ) ;
2013-07-31 17:14:10 +09:00
}
static int exynos_pcie_rd_own_conf ( struct pcie_port * pp , int where , int size ,
u32 * val )
{
2017-01-16 15:31:34 +09:00
struct exynos_pcie * ep = to_exynos_pcie ( pp ) ;
2013-07-31 17:14:10 +09:00
int ret ;
2017-01-16 15:31:34 +09:00
exynos_pcie_sideband_dbi_r_mode ( ep , true ) ;
2015-10-08 14:27:48 -05:00
ret = dw_pcie_cfg_read ( pp - > dbi_base + where , size , val ) ;
2017-01-16 15:31:34 +09:00
exynos_pcie_sideband_dbi_r_mode ( ep , false ) ;
2013-07-31 17:14:10 +09:00
return ret ;
}
static int exynos_pcie_wr_own_conf ( struct pcie_port * pp , int where , int size ,
u32 val )
{
2017-01-16 15:31:34 +09:00
struct exynos_pcie * ep = to_exynos_pcie ( pp ) ;
2013-07-31 17:14:10 +09:00
int ret ;
2017-01-16 15:31:34 +09:00
exynos_pcie_sideband_dbi_w_mode ( ep , true ) ;
2015-10-08 14:27:48 -05:00
ret = dw_pcie_cfg_write ( pp - > dbi_base + where , size , val ) ;
2017-01-16 15:31:34 +09:00
exynos_pcie_sideband_dbi_w_mode ( ep , false ) ;
2013-07-31 17:14:10 +09:00
return ret ;
}
static int exynos_pcie_link_up ( struct pcie_port * pp )
{
2017-01-16 15:31:34 +09:00
struct exynos_pcie * ep = to_exynos_pcie ( pp ) ;
2016-10-06 13:33:39 -05:00
u32 val ;
2013-07-31 17:14:10 +09:00
2017-01-16 15:31:34 +09:00
val = exynos_elb_readl ( ep , PCIE_ELBI_RDLH_LINKUP ) ;
2013-07-31 17:14:10 +09:00
if ( val = = PCIE_ELBI_LTSSM_ENABLE )
return 1 ;
return 0 ;
}
static void exynos_pcie_host_init ( struct pcie_port * pp )
{
2017-01-16 15:31:34 +09:00
struct exynos_pcie * ep = to_exynos_pcie ( pp ) ;
2016-10-06 13:33:39 -05:00
2017-01-16 15:31:34 +09:00
exynos_pcie_establish_link ( ep ) ;
exynos_pcie_enable_interrupts ( ep ) ;
2013-07-31 17:14:10 +09:00
}
static struct pcie_host_ops exynos_pcie_host_ops = {
. readl_rc = exynos_pcie_readl_rc ,
. writel_rc = exynos_pcie_writel_rc ,
. rd_own_conf = exynos_pcie_rd_own_conf ,
. wr_own_conf = exynos_pcie_wr_own_conf ,
. link_up = exynos_pcie_link_up ,
. host_init = exynos_pcie_host_init ,
} ;
2017-01-16 15:31:34 +09:00
static int __init exynos_add_pcie_port ( struct exynos_pcie * ep ,
2014-10-22 13:58:49 +09:00
struct platform_device * pdev )
2013-07-31 17:14:10 +09:00
{
2017-01-16 15:31:34 +09:00
struct pcie_port * pp = & ep - > pp ;
2016-10-06 13:33:41 -05:00
struct device * dev = pp - > dev ;
2013-07-31 17:14:10 +09:00
int ret ;
pp - > irq = platform_get_irq ( pdev , 1 ) ;
if ( ! pp - > irq ) {
2016-10-06 13:33:41 -05:00
dev_err ( dev , " failed to get irq \n " ) ;
2013-07-31 17:14:10 +09:00
return - ENODEV ;
}
2016-10-06 13:33:41 -05:00
ret = devm_request_irq ( dev , pp - > irq , exynos_pcie_irq_handler ,
2017-01-16 15:31:34 +09:00
IRQF_SHARED , " exynos-pcie " , ep ) ;
2013-07-31 17:14:10 +09:00
if ( ret ) {
2016-10-06 13:33:41 -05:00
dev_err ( dev , " failed to request irq \n " ) ;
2013-07-31 17:14:10 +09:00
return ret ;
}
2013-09-06 15:54:59 +09:00
if ( IS_ENABLED ( CONFIG_PCI_MSI ) ) {
pp - > msi_irq = platform_get_irq ( pdev , 0 ) ;
if ( ! pp - > msi_irq ) {
2016-10-06 13:33:41 -05:00
dev_err ( dev , " failed to get msi irq \n " ) ;
2013-09-06 15:54:59 +09:00
return - ENODEV ;
}
2016-10-06 13:33:41 -05:00
ret = devm_request_irq ( dev , pp - > msi_irq ,
2013-09-06 15:54:59 +09:00
exynos_pcie_msi_irq_handler ,
2015-12-10 21:18:20 +02:00
IRQF_SHARED | IRQF_NO_THREAD ,
2017-01-16 15:31:34 +09:00
" exynos-pcie " , ep ) ;
2013-09-06 15:54:59 +09:00
if ( ret ) {
2016-10-06 13:33:41 -05:00
dev_err ( dev , " failed to request msi irq \n " ) ;
2013-09-06 15:54:59 +09:00
return ret ;
}
}
2013-07-31 17:14:10 +09:00
pp - > root_bus_nr = - 1 ;
pp - > ops = & exynos_pcie_host_ops ;
ret = dw_pcie_host_init ( pp ) ;
if ( ret ) {
2016-10-06 13:33:41 -05:00
dev_err ( dev , " failed to initialize host \n " ) ;
2013-07-31 17:14:10 +09:00
return ret ;
}
return 0 ;
}
static int __init exynos_pcie_probe ( struct platform_device * pdev )
{
2016-10-06 13:33:41 -05:00
struct device * dev = & pdev - > dev ;
2017-01-16 15:31:34 +09:00
struct exynos_pcie * ep ;
2013-07-31 17:14:10 +09:00
struct pcie_port * pp ;
2016-10-06 13:33:41 -05:00
struct device_node * np = dev - > of_node ;
2013-07-31 17:14:10 +09:00
struct resource * elbi_base ;
struct resource * phy_base ;
struct resource * block_base ;
int ret ;
2017-01-16 15:31:34 +09:00
ep = devm_kzalloc ( dev , sizeof ( * ep ) , GFP_KERNEL ) ;
if ( ! ep )
2013-07-31 17:14:10 +09:00
return - ENOMEM ;
2017-01-16 15:31:34 +09:00
pp = & ep - > pp ;
2016-10-06 13:33:41 -05:00
pp - > dev = dev ;
2013-07-31 17:14:10 +09:00
2017-01-16 15:31:34 +09:00
ep - > reset_gpio = of_get_named_gpio ( np , " reset-gpio " , 0 ) ;
2013-07-31 17:14:10 +09:00
2017-01-16 15:31:34 +09:00
ep - > clk = devm_clk_get ( dev , " pcie " ) ;
if ( IS_ERR ( ep - > clk ) ) {
2016-10-06 13:33:41 -05:00
dev_err ( dev , " Failed to get pcie rc clock \n " ) ;
2017-01-16 15:31:34 +09:00
return PTR_ERR ( ep - > clk ) ;
2013-07-31 17:14:10 +09:00
}
2017-01-16 15:31:34 +09:00
ret = clk_prepare_enable ( ep - > clk ) ;
2013-07-31 17:14:10 +09:00
if ( ret )
return ret ;
2017-01-16 15:31:34 +09:00
ep - > bus_clk = devm_clk_get ( dev , " pcie_bus " ) ;
if ( IS_ERR ( ep - > bus_clk ) ) {
2016-10-06 13:33:41 -05:00
dev_err ( dev , " Failed to get pcie bus clock \n " ) ;
2017-01-16 15:31:34 +09:00
ret = PTR_ERR ( ep - > bus_clk ) ;
2013-07-31 17:14:10 +09:00
goto fail_clk ;
}
2017-01-16 15:31:34 +09:00
ret = clk_prepare_enable ( ep - > bus_clk ) ;
2013-07-31 17:14:10 +09:00
if ( ret )
goto fail_clk ;
elbi_base = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
2017-01-16 15:31:34 +09:00
ep - > elbi_base = devm_ioremap_resource ( dev , elbi_base ) ;
if ( IS_ERR ( ep - > elbi_base ) ) {
ret = PTR_ERR ( ep - > elbi_base ) ;
2013-09-29 10:29:11 +08:00
goto fail_bus_clk ;
}
2013-07-31 17:14:10 +09:00
phy_base = platform_get_resource ( pdev , IORESOURCE_MEM , 1 ) ;
2017-01-16 15:31:34 +09:00
ep - > phy_base = devm_ioremap_resource ( dev , phy_base ) ;
if ( IS_ERR ( ep - > phy_base ) ) {
ret = PTR_ERR ( ep - > phy_base ) ;
2013-09-29 10:29:11 +08:00
goto fail_bus_clk ;
}
2013-07-31 17:14:10 +09:00
block_base = platform_get_resource ( pdev , IORESOURCE_MEM , 2 ) ;
2017-01-16 15:31:34 +09:00
ep - > block_base = devm_ioremap_resource ( dev , block_base ) ;
if ( IS_ERR ( ep - > block_base ) ) {
ret = PTR_ERR ( ep - > block_base ) ;
2013-09-29 10:29:11 +08:00
goto fail_bus_clk ;
}
2013-07-31 17:14:10 +09:00
2017-01-16 15:31:34 +09:00
ret = exynos_add_pcie_port ( ep , pdev ) ;
2013-07-31 17:14:10 +09:00
if ( ret < 0 )
goto fail_bus_clk ;
2017-01-16 15:31:34 +09:00
platform_set_drvdata ( pdev , ep ) ;
2013-07-31 17:14:10 +09:00
return 0 ;
fail_bus_clk :
2017-01-16 15:31:34 +09:00
clk_disable_unprepare ( ep - > bus_clk ) ;
2013-07-31 17:14:10 +09:00
fail_clk :
2017-01-16 15:31:34 +09:00
clk_disable_unprepare ( ep - > clk ) ;
2013-07-31 17:14:10 +09:00
return ret ;
}
static int __exit exynos_pcie_remove ( struct platform_device * pdev )
{
2017-01-16 15:31:34 +09:00
struct exynos_pcie * ep = platform_get_drvdata ( pdev ) ;
2013-07-31 17:14:10 +09:00
2017-01-16 15:31:34 +09:00
clk_disable_unprepare ( ep - > bus_clk ) ;
clk_disable_unprepare ( ep - > clk ) ;
2013-07-31 17:14:10 +09:00
return 0 ;
}
static const struct of_device_id exynos_pcie_of_match [ ] = {
{ . compatible = " samsung,exynos5440-pcie " , } ,
{ } ,
} ;
static struct platform_driver exynos_pcie_driver = {
. remove = __exit_p ( exynos_pcie_remove ) ,
. driver = {
. name = " exynos-pcie " ,
2013-10-21 14:36:43 +05:30
. of_match_table = exynos_pcie_of_match ,
2013-07-31 17:14:10 +09:00
} ,
} ;
/* Exynos PCIe driver does not allow module unload */
2014-10-22 13:58:49 +09:00
static int __init exynos_pcie_init ( void )
2013-07-31 17:14:10 +09:00
{
return platform_driver_probe ( & exynos_pcie_driver , exynos_pcie_probe ) ;
}
2014-10-22 13:58:49 +09:00
subsys_initcall ( exynos_pcie_init ) ;